Build Logs

softwaremill/ox • 3.8.0:2026-01-13

Errors

231

Warnings

473

Total Lines

11240

1##################################
2Clonning https://github.com/softwaremill/ox.git into /build/repo using revision v1.0.2
3##################################
4Note: switching to '9cb2ebc1df12e84198f24d8429f0ed135892788e'.
5
6You are in 'detached HEAD' state. You can look around, make experimental
7changes and commit them, and you can discard any commits you make in this
8state without impacting any branches by switching back to a branch.
9
10If you want to create a new branch to retain commits you create, you may
11do so (now or later) by using -c with the switch command. Example:
12
13 git switch -c <new-branch-name>
14
15Or undo this operation with:
16
17 git switch -
18
19Turn off this advice by setting config variable advice.detachedHead to false
20
21Using target Scala version for migration: 3.7.4
22Migrating project for -source:3.7 using Scala 3.7.4
23----
24Preparing build for 3.7.4
25Would try to apply common scalacOption (best-effort, sbt/mill only):
26Append: -rewrite,REQUIRE:-source:3.7-migration
27Remove: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
28----
29Starting build for 3.7.4
30Execute tests: false
31sbt project found:
32No prepare script found for project softwaremill/ox
33##################################
34Scala version: 3.7.4
35Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
36Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
37##################################
38Using extra scalacOptions: -rewrite,REQUIRE:-source:3.7-migration
39Filtering out scalacOptions: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
40[sbt_options] declare -a sbt_options=()
41[process_args] java_version = '21'
42[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
43# Executing command line:
44java
45-Dfile.encoding=UTF-8
46-Dcommunitybuild.scala=3.7.4
47-Dcommunitybuild.project.dependencies.add=
48-Xmx7G
49-Xms4G
50-Xss8M
51-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
52-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
53-jar
54/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
55"setCrossScalaVersions 3.7.4"
56"++3.7.4 -v"
57"mapScalacOptions "-rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s" "-indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
58"set every credentials := Nil"
59"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
60"removeScalacOptionsStartingWith -P:wartremover"
61
62moduleMappings
63"runBuild 3.7.4 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
64
65[info] [launcher] getting org.scala-sbt sbt 1.11.7 (this may take some time)...
66[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
67[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
68[info] loading project definition from /build/repo/project
69[info] compiling 2 Scala sources to /build/repo/project/target/scala-2.12/sbt-1.0/classes ...
70[info] Non-compiled module 'compiler-bridge_2.12' for Scala 2.12.20. Compiling...
71[info] Compilation completed in 8.378s.
72[info] done compiling
73[info] loading settings for project rootProject from build.sbt...
74[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
75[info] set current project to ox (in build file:/build/repo/)
76Execute setCrossScalaVersions: 3.7.4
77OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in core/crossScalaVersions
78OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in core/crossScalaVersions
79OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in cron/crossScalaVersions
80OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in cron/crossScalaVersions
81OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in documentation/crossScalaVersions
82OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in documentation/crossScalaVersions
83OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in rootProject/crossScalaVersions
84OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in rootProject/crossScalaVersions
85OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in kafka/crossScalaVersions
86OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in kafka/crossScalaVersions
87OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in otelContext/crossScalaVersions
88OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in otelContext/crossScalaVersions
89OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in mdcLogback/crossScalaVersions
90OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in mdcLogback/crossScalaVersions
91OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in flowReactiveStreams/crossScalaVersions
92OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in flowReactiveStreams/crossScalaVersions
93[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
94[info] set current project to ox (in build file:/build/repo/)
95[info] Setting Scala version to 3.7.4 on 8 projects.
96[info] Switching Scala version on:
97[info] flowReactiveStreams (3.7.4)
98[info] documentation (3.7.4)
99[info] cron (3.7.4)
100[info] * rootProject (3.7.4)
101[info] mdcLogback (3.7.4)
102[info] kafka (3.7.4)
103[info] core (3.7.4)
104[info] otelContext (3.7.4)
105[info] Excluding projects:
106[info] Reapplying settings...
107[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
108[info] set current project to ox (in build file:/build/repo/)
109Execute mapScalacOptions: -rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
110[info] Reapplying settings...
111[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
112[info] set current project to ox (in build file:/build/repo/)
113[info] Defining Global / credentials, core / credentials and 6 others.
114[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
115[info] Run `last` for details.
116[info] Reapplying settings...
117[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
118[info] set current project to ox (in build file:/build/repo/)
119Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
120[info] Reapplying settings...
121OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
122
123 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
124 Did you mean flowReactiveStreams / allExcludeDependencies ?
125 , retry without global scopes
126[info] Reapplying settings...
127[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
128[info] set current project to ox (in build file:/build/repo/)
129Execute removeScalacOptionsStartingWith: -P:wartremover
130[info] Reapplying settings...
131[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
132[info] set current project to ox (in build file:/build/repo/)
133[success] Total time: 0 s, completed Jan 13, 2026, 3:16:10 PM
134Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
135Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),CompileOnly,List()))
136Starting build...
137Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
138Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
139OpenCB::Exclude Scala3 specific scalacOption `-rewrite` in Scala 2.12.20 module Global
140OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.7-migration` in Scala 2.12.20 module Global
141OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
142OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
143Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
144[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
145[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
146[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
147[warn] | ^
148[warn] | unused implicit parameter
149[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
150[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
151[warn] | ^
152[warn] | unused implicit parameter
153[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
154[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
155[warn] | ^
156[warn] | unused explicit parameter
157[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
158[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
159[warn] | ^
160[warn] | unused explicit parameter
161[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/channels/SourceOps.scala:5:12
162[warn] 5 |import java.util
163[warn] | ^^^^
164[warn] | unused import
165[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
166[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
167[warn] | ^^^^
168[warn] | unused explicit parameter
169[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:5:10
170[warn] 5 |import ox.Ox
171[warn] | ^^
172[warn] | unused import
173[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:616:8
174[warn] 616 | tap(t => sleep(emitEveryMillis))
175[warn] | ^
176[warn] | unused explicit parameter
177[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:947:53
178[warn] 947 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
179[warn] | ^^^^
180[warn] | unused explicit parameter
181[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala:11:10
182[warn] 11 |import ox.fork
183[warn] | ^^^^
184[warn] | unused import
185[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
186[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
187[warn] | ^^^^^^
188[warn] | unused explicit parameter
189[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
190[warn] 123 | private var successCalls = 0
191[warn] | ^^^^^^^^^^^^
192[warn] | private variable was mutated but not read
193[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
194[warn] 167 | private var successCalls = 0
195[warn] | ^^^^^^^^^^^^
196[warn] | private variable was mutated but not read
197[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:414:15
198[warn] 414 | case Nested(t) =>
199[warn] | ^
200[warn] |the type test for Nested cannot be checked at runtime because it's a local class
201[warn] |
202[warn] | longer explanation available when compiling with `-explain`
203[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
204[warn] 150 | case FromParent(t) =>
205[warn] | ^
206[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
207[warn] |
208[warn] | longer explanation available when compiling with `-explain`
209[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
210[warn] 154 | case ChildDone(v) =>
211[warn] | ^
212[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
213[warn] |
214[warn] | longer explanation available when compiling with `-explain`
215[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowOps.scala]
216[info] [patched file /build/repo/core/src/main/scala/ox/channels/SourceOps.scala]
217[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala]
218[info] [patched file /build/repo/core/src/main/scala/ox/oxThreadFactory.scala]
219[info] [patched file /build/repo/core/src/main/scala/ox/local.scala]
220[warn] 16 warnings found
221[info] done compiling
222[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.7.4/classes ...
223[info] done compiling
224[info] compiling 5 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
225[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
226[warn] 615 | tap(t => sleep(emitEveryMillis))
227[warn] | ^
228[warn] | unused explicit parameter
229[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
230[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
231[warn] | ^^^^
232[warn] | unused explicit parameter
233[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
234[warn] 413 | case Nested(t) =>
235[warn] | ^
236[warn] |the type test for Nested cannot be checked at runtime because it's a local class
237[warn] |
238[warn] | longer explanation available when compiling with `-explain`
239[warn] three warnings found
240[info] done compiling
241[info] compiling 1 Scala source to /build/repo/core/target/scala-3.7.4/classes ...
242[warn] three warnings found
243[info] done compiling
244[info] compiling 25 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
245[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
246[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
247[warn] | ^^^^
248[warn] | unused explicit parameter
249[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
250[warn] 123 | private var successCalls = 0
251[warn] | ^^^^^^^^^^^^
252[warn] | private variable was mutated but not read
253[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
254[warn] 167 | private var successCalls = 0
255[warn] | ^^^^^^^^^^^^
256[warn] | private variable was mutated but not read
257[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
258[warn] 150 | case FromParent(t) =>
259[warn] | ^
260[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
261[warn] |
262[warn] | longer explanation available when compiling with `-explain`
263[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
264[warn] 154 | case ChildDone(v) =>
265[warn] | ^
266[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
267[warn] |
268[warn] | longer explanation available when compiling with `-explain`
269[warn] 8 warnings found
270[info] done compiling
271Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
272Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
273[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/classes ...
274[info] done compiling
275[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/test-classes ...
276[info] done compiling
277Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
278Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
279[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
280[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
281[warn] 43 | def transformation(i: Int) =
282[warn] | ^
283[warn] | unused explicit parameter
284[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
285[warn] 38 | def transformation(i: Int) =
286[warn] | ^
287[warn] | unused explicit parameter
288[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
289[warn] 43 | def transformation(i: Int) =
290[warn] | ^
291[warn] | unused explicit parameter
292[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:9:27
293[warn] 9 |import scala.util.boundary.*
294[warn] | ^
295[warn] | unused import
296[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:29
297[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
298[warn] | ^^^^^^^^^
299[warn] | unused import
300[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:40
301[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
302[warn] | ^^^^^^^^
303[warn] | unused import
304[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
305[warn] 80 | (1 to 5).map(i =>
306[warn] | ^
307[warn] | unused explicit parameter
308[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
309[warn] 126 | use(new TestResource, _.release()) { r =>
310[warn] | ^
311[warn] | unused explicit parameter
312[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
313[warn] 140 | useCloseable(new TestResource) { r =>
314[warn] | ^
315[warn] | unused explicit parameter
316[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
317[warn] 157 | use(new TestResource, _.release()) { r =>
318[warn] | ^
319[warn] | unused explicit parameter
320[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:3:43
321[warn] 3 |import org.scalatest.concurrent.Eventually.*
322[warn] | ^
323[warn] | unused import
324[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:3:43
325[warn] 3 |import org.scalatest.concurrent.Eventually.*
326[warn] | ^
327[warn] | unused import
328[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:387:44
329[warn] 387 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
330[warn] | ^
331[warn] | unused implicit parameter
332[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala:5:10
333[warn] 5 |import ox.*
334[warn] | ^
335[warn] | unused import
336[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala:5:10
337[warn] 5 |import ox.*
338[warn] | ^
339[warn] | unused import
340[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala:5:10
341[warn] 5 |import ox.*
342[warn] | ^
343[warn] | unused import
344[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala:5:10
345[warn] 5 |import ox.*
346[warn] | ^
347[warn] | unused import
348[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala:5:10
349[warn] 5 |import ox.*
350[warn] | ^
351[warn] | unused import
352[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala:5:10
353[warn] 5 |import ox.*
354[warn] | ^
355[warn] | unused import
356[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala:5:10
357[warn] 5 |import ox.*
358[warn] | ^
359[warn] | unused import
360[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala:5:10
361[warn] 5 |import ox.*
362[warn] | ^
363[warn] | unused import
364[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala:6:10
365[warn] 6 |import ox.*
366[warn] | ^
367[warn] | unused import
368[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala:5:10
369[warn] 5 |import ox.*
370[warn] | ^
371[warn] | unused import
372[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala:5:10
373[warn] 5 |import ox.*
374[warn] | ^
375[warn] | unused import
376[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala:5:10
377[warn] 5 |import ox.*
378[warn] | ^
379[warn] | unused import
380[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
381[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
382[warn] | ^
383[warn] | unused explicit parameter
384[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
385[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
386[warn] | ^
387[warn] | unused explicit parameter
388[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
389[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
390[warn] | ^
391[warn] | unused explicit parameter
392[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
393[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
394[warn] | ^
395[warn] | unused explicit parameter
396[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
397[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
398[warn] | ^
399[warn] | unused explicit parameter
400[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
401[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
402[warn] | ^
403[warn] | unused explicit parameter
404[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
405[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
406[warn] | ^
407[warn] | unused explicit parameter
408[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
409[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
410[warn] | ^
411[warn] | unused explicit parameter
412[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala:6:10
413[warn] 6 |import ox.*
414[warn] | ^
415[warn] | unused import
416[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala:5:10
417[warn] 5 |import ox.*
418[warn] | ^
419[warn] | unused import
420[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala:5:10
421[warn] 5 |import ox.*
422[warn] | ^
423[warn] | unused import
424[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala:5:10
425[warn] 5 |import ox.*
426[warn] | ^
427[warn] | unused import
428[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala:5:10
429[warn] 5 |import ox.*
430[warn] | ^
431[warn] | unused import
432[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala:5:10
433[warn] 5 |import ox.*
434[warn] | ^
435[warn] | unused import
436[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala:5:10
437[warn] 5 |import ox.*
438[warn] | ^
439[warn] | unused import
440[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala:5:10
441[warn] 5 |import ox.*
442[warn] | ^
443[warn] | unused import
444[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala:5:10
445[warn] 5 |import ox.*
446[warn] | ^
447[warn] | unused import
448[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala:5:10
449[warn] 5 |import ox.*
450[warn] | ^
451[warn] | unused import
452[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala:5:10
453[warn] 5 |import ox.*
454[warn] | ^
455[warn] | unused import
456[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala:6:10
457[warn] 6 |import ox.*
458[warn] | ^
459[warn] | unused import
460[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala:5:10
461[warn] 5 |import ox.*
462[warn] | ^
463[warn] | unused import
464[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala:5:10
465[warn] 5 |import ox.*
466[warn] | ^
467[warn] | unused import
468[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
469[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
470[warn] | ^^^^^^^
471[warn] | unused explicit parameter
472[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
473[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
474[warn] | ^^^^^^^
475[warn] | unused explicit parameter
476[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala]
477[info] [patched file /build/repo/core/src/test/scala/ox/MapParTest.scala]
478[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala]
479[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala]
480[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala]
481[info] [patched file /build/repo/core/src/test/scala/ox/FilterParTest.scala]
482[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala]
483[info] [patched file /build/repo/core/src/test/scala/ox/OxAppTest.scala]
484[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala]
485[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala]
486[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala]
487[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala]
488[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala]
489[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala]
490[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala]
491[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipWithIndexTest.scala]
492[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala]
493[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala]
494[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala]
495[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala]
496[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala]
497[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala]
498[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala]
499[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala]
500[info] [patched file /build/repo/core/src/test/scala/ox/CollectParTest.scala]
501[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala]
502[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala]
503[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala]
504[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala]
505[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala]
506[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala]
507[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala]
508[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala]
509[warn] 49 warnings found
510[info] done compiling
511[info] compiling 33 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
512[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
513[warn] 43 | def transformation(i: Int) =
514[warn] | ^
515[warn] | unused explicit parameter
516[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
517[warn] 43 | def transformation(i: Int) =
518[warn] | ^
519[warn] | unused explicit parameter
520[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
521[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
522[warn] | ^
523[warn] | unused implicit parameter
524[warn] three warnings found
525[info] done compiling
526Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
527Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
528[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/classes ...
529[info] done compiling
530[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
531[warn] -- [E198] Unused Symbol Warning: /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala:7:33
532[warn] 7 |import scala.concurrent.duration.*
533[warn] | ^
534[warn] | unused import
535[info] [patched file /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala]
536[warn] one warning found
537[info] done compiling
538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
539[info] done compiling
540Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
541Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
542[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.7.4/classes ...
543[info] done compiling
544Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
545Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
546[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
547[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
548[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
549[warn] | ^^^^^^^
550[warn] | unused explicit parameter
551[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala:3:41
552[warn] 3 |import org.apache.kafka.clients.consumer.ConsumerRecord
553[warn] | ^^^^^^^^^^^^^^
554[warn] | unused import
555[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/kafkaOffsetCommit.scala]
556[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala]
557[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaStage.scala]
558[warn] two warnings found
559[info] done compiling
560[info] compiling 3 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
561[info] done compiling
562[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.7.4/test-classes ...
563[info] [patched file /build/repo/kafka/src/test/scala/ox/kafka/KafkaTest.scala]
564[info] done compiling
565[info] compiling 1 Scala source to /build/repo/kafka/target/scala-3.7.4/test-classes ...
566[info] done compiling
567
568************************
569Build summary:
570[{
571 "module": "flow-reactive-streams",
572 "compile": {"status": "ok", "tookMs": 16732, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
573 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
574 "test-compile": {"status": "ok", "tookMs": 6469, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
575 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
576 "publish": {"status": "skipped", "tookMs": 0},
577 "metadata": {
578 "crossScalaVersions": ["2.12.20"]
579}
580},{
581 "module": "mdc-logback",
582 "compile": {"status": "ok", "tookMs": 508, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
583 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
584 "test-compile": {"status": "ok", "tookMs": 1076, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
585 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
586 "publish": {"status": "skipped", "tookMs": 0},
587 "metadata": {
588 "crossScalaVersions": ["2.12.20"]
589}
590},{
591 "module": "core",
592 "compile": {"status": "ok", "tookMs": 54, "warnings": 16, "errors": 0, "sourceVersion": "3.7-migration"},
593 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
594 "test-compile": {"status": "ok", "tookMs": 21666, "warnings": 49, "errors": 0, "sourceVersion": "3.7-migration"},
595 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
596 "publish": {"status": "skipped", "tookMs": 0},
597 "metadata": {
598 "crossScalaVersions": ["2.12.20"]
599}
600},{
601 "module": "cron",
602 "compile": {"status": "ok", "tookMs": 430, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
603 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
604 "test-compile": {"status": "ok", "tookMs": 634, "warnings": 1, "errors": 0, "sourceVersion": "3.7-migration"},
605 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
606 "publish": {"status": "skipped", "tookMs": 0},
607 "metadata": {
608 "crossScalaVersions": ["2.12.20"]
609}
610},{
611 "module": "otel-context",
612 "compile": {"status": "ok", "tookMs": 272, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
613 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
614 "test-compile": {"status": "ok", "tookMs": 199, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
615 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
616 "publish": {"status": "skipped", "tookMs": 0},
617 "metadata": {
618 "crossScalaVersions": ["2.12.20"]
619}
620},{
621 "module": "kafka",
622 "compile": {"status": "ok", "tookMs": 638, "warnings": 2, "errors": 0, "sourceVersion": "3.7-migration"},
623 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
624 "test-compile": {"status": "ok", "tookMs": 1727, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
625 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
626 "publish": {"status": "skipped", "tookMs": 0},
627 "metadata": {
628 "crossScalaVersions": ["2.12.20"]
629}
630}]
631************************
632[success] Total time: 63 s (0:01:03.0), completed Jan 13, 2026, 3:17:13 PM
633[0JChecking patch project/plugins.sbt...
634Checking patch build.sbt...
635Applied patch project/plugins.sbt cleanly.
636Applied patch build.sbt cleanly.
637Commit migration rewrites
638Switched to a new branch 'opencb/migrate-source-3.7'
639[opencb/migrate-source-3.7 dec257e] Apply Scala compiler rewrites using -source:3.7-migration using Scala 3.7.4
640 43 files changed, 24 insertions(+), 60 deletions(-)
641----
642Preparing build for 3.8.0
643Scala binary version found: 3.8
644Implicitly using source version 3.8
645Scala binary version found: 3.8
646Implicitly using source version 3.8
647Would try to apply common scalacOption (best-effort, sbt/mill only):
648Append: ,REQUIRE:-source:3.8
649Remove: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
650----
651Starting build for 3.8.0
652Execute tests: true
653sbt project found:
654No prepare script found for project softwaremill/ox
655##################################
656Scala version: 3.8.0
657Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
658Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
659##################################
660Using extra scalacOptions: ,REQUIRE:-source:3.8
661Filtering out scalacOptions: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
662[sbt_options] declare -a sbt_options=()
663[process_args] java_version = '21'
664[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
665# Executing command line:
666java
667-Dfile.encoding=UTF-8
668-Dcommunitybuild.scala=3.8.0
669-Dcommunitybuild.project.dependencies.add=
670-Xmx7G
671-Xms4G
672-Xss8M
673-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
674-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
675-jar
676/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
677"setCrossScalaVersions 3.8.0"
678"++3.8.0 -v"
679"mapScalacOptions ",REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s" ",-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
680"set every credentials := Nil"
681"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
682"removeScalacOptionsStartingWith -P:wartremover"
683
684moduleMappings
685"runBuild 3.8.0 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
686
687[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
688[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
689[info] loading project definition from /build/repo/project
690[info] loading settings for project rootProject from build.sbt...
691[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
692[info] set current project to ox (in build file:/build/repo/)
693Execute setCrossScalaVersions: 3.8.0
694OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in core/crossScalaVersions
695OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in cron/crossScalaVersions
696OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in core/crossScalaVersions
697OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in cron/crossScalaVersions
698OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in mdcLogback/crossScalaVersions
699OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in documentation/crossScalaVersions
700OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in flowReactiveStreams/crossScalaVersions
701OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in documentation/crossScalaVersions
702OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in flowReactiveStreams/crossScalaVersions
703OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in rootProject/crossScalaVersions
704OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in rootProject/crossScalaVersions
705OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in otelContext/crossScalaVersions
706OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in otelContext/crossScalaVersions
707OpenCB::Changing crossVersion 3.3.7 -> 3.8.0 in kafka/crossScalaVersions
708OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in kafka/crossScalaVersions
709OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0) in mdcLogback/crossScalaVersions
710[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
711[info] set current project to ox (in build file:/build/repo/)
712[info] Setting Scala version to 3.8.0 on 8 projects.
713[info] Switching Scala version on:
714[info] flowReactiveStreams (3.8.0)
715[info] documentation (3.8.0)
716[info] cron (3.8.0)
717[info] * rootProject (3.8.0)
718[info] mdcLogback (3.8.0)
719[info] kafka (3.8.0)
720[info] core (3.8.0)
721[info] otelContext (3.8.0)
722[info] Excluding projects:
723[info] Reapplying settings...
724[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
725[info] set current project to ox (in build file:/build/repo/)
726Execute mapScalacOptions: ,REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
727[info] Reapplying settings...
728[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
729[info] set current project to ox (in build file:/build/repo/)
730[info] Defining Global / credentials, core / credentials and 6 others.
731[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
732[info] Run `last` for details.
733[info] Reapplying settings...
734[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
735[info] set current project to ox (in build file:/build/repo/)
736Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
737[info] Reapplying settings...
738OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
739
740 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
741 Did you mean flowReactiveStreams / allExcludeDependencies ?
742 , retry without global scopes
743[info] Reapplying settings...
744[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
745[info] set current project to ox (in build file:/build/repo/)
746Execute removeScalacOptionsStartingWith: -P:wartremover
747[info] Reapplying settings...
748[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
749[info] set current project to ox (in build file:/build/repo/)
750[success] Total time: 0 s, completed Jan 13, 2026, 3:17:25 PM
751Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
752Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),Full,List()))
753Starting build...
754Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
755Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
756OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.8` in Scala 2.12.20 module Global
757OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
758OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
759Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
760[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.8.0/classes ...
761[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
762[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
763[warn] | ^
764[warn] | unused implicit parameter
765[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
766[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
767[warn] | ^
768[warn] | unused implicit parameter
769[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
770[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
771[warn] | ^
772[warn] | unused explicit parameter
773[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
774[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
775[warn] | ^
776[warn] | unused explicit parameter
777[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
778[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
779[warn] | ^^^^
780[warn] | unused explicit parameter
781[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
782[warn] 615 | tap(t => sleep(emitEveryMillis))
783[warn] | ^
784[warn] | unused explicit parameter
785[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
786[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
787[warn] | ^^^^
788[warn] | unused explicit parameter
789[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
790[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
791[warn] | ^^^^^^
792[warn] | unused explicit parameter
793[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
794[warn] 123 | private var successCalls = 0
795[warn] | ^^^^^^^^^^^^
796[warn] | private variable was mutated but not read
797[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
798[warn] 167 | private var successCalls = 0
799[warn] | ^^^^^^^^^^^^
800[warn] | private variable was mutated but not read
801[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
802[warn] 413 | case Nested(t) =>
803[warn] | ^
804[warn] |the type test for Nested cannot be checked at runtime because it's a local class
805[warn] |
806[warn] | longer explanation available when compiling with `-explain`
807[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
808[warn] 150 | case FromParent(t) =>
809[warn] | ^
810[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
811[warn] |
812[warn] | longer explanation available when compiling with `-explain`
813[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
814[warn] 154 | case ChildDone(v) =>
815[warn] | ^
816[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
817[warn] |
818[warn] | longer explanation available when compiling with `-explain`
819[warn] 13 warnings found
820[info] done compiling
821[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.8.0/classes ...
822[info] done compiling
823Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
824Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
825[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0/classes ...
826[info] done compiling
827[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0/test-classes ...
828[info] done compiling
82915:17:48.144 [pool-28-thread-6] INFO ox.logback.InheritableMDC$ -- Scoped-value based MDC initialized
830[info] InheritableMDCTest:
831[info] - should make MDC values available in forks
832Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
833Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
834[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.8.0/test-classes ...
835[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
836[warn] 43 | def transformation(i: Int) =
837[warn] | ^
838[warn] | unused explicit parameter
839[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
840[warn] 38 | def transformation(i: Int) =
841[warn] | ^
842[warn] | unused explicit parameter
843[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
844[warn] 43 | def transformation(i: Int) =
845[warn] | ^
846[warn] | unused explicit parameter
847[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
848[warn] 80 | (1 to 5).map(i =>
849[warn] | ^
850[warn] | unused explicit parameter
851[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
852[warn] 126 | use(new TestResource, _.release()) { r =>
853[warn] | ^
854[warn] | unused explicit parameter
855[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
856[warn] 140 | useCloseable(new TestResource) { r =>
857[warn] | ^
858[warn] | unused explicit parameter
859[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
860[warn] 157 | use(new TestResource, _.release()) { r =>
861[warn] | ^
862[warn] | unused explicit parameter
863[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:5:19
864[warn] 5 |import ox.{timeout as _, *}
865[warn] | ^^^^^^^^^^^^
866[warn] | unused import
867[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:5:19
868[warn] 5 |import ox.{timeout as _, *}
869[warn] | ^^^^^^^^^^^^
870[warn] | unused import
871[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
872[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
873[warn] | ^
874[warn] | unused implicit parameter
875[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
876[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
877[warn] | ^
878[warn] | unused explicit parameter
879[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
880[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
881[warn] | ^
882[warn] | unused explicit parameter
883[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
884[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
885[warn] | ^
886[warn] | unused explicit parameter
887[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
888[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
889[warn] | ^
890[warn] | unused explicit parameter
891[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
892[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
893[warn] | ^
894[warn] | unused explicit parameter
895[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
896[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
897[warn] | ^
898[warn] | unused explicit parameter
899[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
900[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
901[warn] | ^
902[warn] | unused explicit parameter
903[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
904[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
905[warn] | ^
906[warn] | unused explicit parameter
907[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
908[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
909[warn] | ^^^^^^^
910[warn] | unused explicit parameter
911[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
912[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
913[warn] | ^^^^^^^
914[warn] | unused explicit parameter
915[warn] 20 warnings found
916[info] done compiling
917[info] AfterAttemptTest:
918[info] RetryPolicy afterAttempt callback
919[info] - should retry a succeeding function with afterAttempt callback
920[info] - should retry a failing function with afterAttempt callback
921[info] SourceOpsTest:
922[info] - should pipe one source to another
923[info] - should pipe one source to another (with done propagation)
924[info] - should tap over a source
925[info] FlowOpsReduceTest:
926[info] reduce
927[info] - should throw NoSuchElementException for reduce over the empty source
928[info] - should throw exception thrown in `f` when `f` throws
929[info] - should return first element from reduce over the single element source
930[info] - should run reduce over on non-empty source
931[info] ExceptionTest:
932[info] unsupervised
933[2026-01-13T14:18:13.867513261Z] [24] CustomException
934[info] - should throw the exception thrown by a joined fork
935[info] supervised
936[2026-01-13T14:18:13.883997211Z] [24] CustomException
937[info] - should throw the exception thrown in the scope
938[2026-01-13T14:18:13.888076286Z] [24] CustomException(suppressed=ExecutionException)
939[info] - should retain the original exception for context, as suppressed
940[2026-01-13T14:18:13.895389043Z] [24] CustomException
941[info] - should throw the exception thrown by a failing fork
942[2026-01-13T14:18:14.001367266Z] [24] CustomException(suppressed=ExecutionException,InterruptedException,InterruptedException)
943[info] - should interrupt other forks when there's a failure, add suppressed interrupted exceptions
944[2026-01-13T14:18:14.105030428Z] [24] CustomException(suppressed=ExecutionException,CustomException2)
945[info] - should interrupt other forks when there's a failure, add suppressed custom exceptions
946[2026-01-13T14:18:14.108177839Z] [24] CustomException(suppressed=ExecutionException,InterruptedException)
947[info] - should not add the original exception as suppressed
948[2026-01-13T14:18:14.114765056Z] [24] CustomException(suppressed=ExecutionException,CustomException3)
949[info] - should add an exception as suppressed, even if it wraps the original exception
950[info] joinEither
951[info] - should catch the exception with which a fork ends
952[info] ScheduleFallingBackRetryTest:
953[info] retry with combination of schedules
954[info] - should retry 3 times immediately and then 2 times with delay
955[info] - should retry forever
956[info] DelayedRetryTest:
957[info] Delayed retry
958[info] - should retry a function
959[info] - should retry a failing function forever
960[info] - should retry an Either
961[info] adaptive retry with delayed config
962[info] - should retry a failing function forever or until adaptive retry blocks it
963[info] CircuitBreakerTest:
964[info] Circuit Breaker run operations
965[info] - should run operation when metrics are not exceeded
966[info] - should drop operation after exceeding failure threshold
967[info] - should drop operation after exceeding slow call threshold
968[info] Circuit Breaker scheduled state changes
969[info] - should switch to halfOpen after configured time
970[info] - should switch back to open after configured timeout in half open state
971[info] - should correctly transitions through states when there are concurrently running operations
972[info] - should correctly calculate metrics when results come in after state change
973[info] FlowOpsTakeWhileTest:
974[info] takeWhile
975[info] - should not take from the empty flow
976[info] - should take as long as predicate is satisfied
977[info] - should take the failed element if includeFirstFailing = true
978[info] - should work if all elements match the predicate
979[info] - should fail the sourcewith the same exception as the initial source
980[info] - should not take if predicate fails for first or more elements
981[info] FlowOpsInterleaveAllTest:
982[info] interleaveAll
983[info] - should interleave no sources
984[info] - should interleave a single flow
985[info] - should interleave multiple flows
986[info] - should interleave multiple flows using custom segment size
987[info] - should interleave multiple flows using custom segment size and complete eagerly
988[info] AppErrorTest:
989[info] supervisedError
990[info] - should return the app error from the main body
991[info] - should return success from the main body
992[info] - should return the app error returned by a failing fork
993[info] - should return success from the main body if a fork is successful
994[info] - should interrupt other forks if one fails
995[info] ChunkTest:
996[info] Chunk
997[info] - should create empty chunks
998[info] - should create chunks from arrays
999[info] - should create chunks from IArrays
1000[info] - should create chunks from elements
1001[info] - should create empty chunks from empty arrays
1002[info] - should support random access
1003[info] - should throw IndexOutOfBoundsException for invalid indices
1004[info] - should support iteration
1005[info] - should support foreach operations
1006[info] - should concatenate two non-empty chunks efficiently
1007[info] - should handle concatenation with empty chunks
1008[info] - should support chained concatenation
1009[info] - should concatenate chunks of different types
1010[info] - should concatenate non-empty chunk with non-chunk collections
1011[info] - should concatenate empty chunk with non-chunk collections
1012[info] - should handle concatenation with empty collections
1013[info] - should support drop operations
1014[info] - should support take operations
1015[info] - should handle drop/take on concatenated chunks
1016[info] - should support map operations
1017[info] - should support filter operations
1018[info] - should support collect operations
1019[info] - should convert to arrays correctly
1020[info] - should convert concatenated chunks to arrays correctly
1021[info] - should convert byte chunks to strings
1022[info] - should convert concatenated byte chunks to strings
1023[info] - should provide access to backing arrays
1024[info] - should allow efficient processing via backing arrays
1025[info] - should handle operations on empty chunks
1026[info] - should maintain consistency between single and multi-array chunks
1027[info] - should handle large chunks efficiently
1028[info] - should support indexWhere on single chunks
1029[info] - should support indexWhere on concatenated chunks
1030[info] - should handle indexWhere on empty chunks
1031[info] - should handle indexWhere edge cases with concatenated chunks
1032[info] - should support contains and exists operations
1033[info] FlowOpsFoldTest:
1034[info] fold
1035[info] - should throw an exception for a failed flow
1036[info] - should throw exception thrown in `f` when `f` throws
1037[info] - should return `zero` value from fold on the empty source
1038[info] - should return fold on non-empty fold
1039[info] FlowOpsFilterTest:
1040[info] filter
1041[info] - should not filter anything from the empty flow
1042[info] - should filter out everything if no element meets 'f'
1043[info] - should not filter anything if all the elements meet 'f'
1044[info] - should filter out elements that don't meet 'f'
1045[info] FlowOpsMapUsingSinkTest:
1046[info] mapUsingSink
1047[info] - should map over a source, using emit
1048[info] - should propagate errors
1049[info] FlowOpsCollectTest:
1050[info] collect
1051[info] - should collect over a source
1052[info] FlowOpsGroupedTest:
1053[info] grouped
1054[info] - should emit grouped elements
1055[info] - should emit grouped elements and include remaining values when flow closes
1056[info] - should return failed flow when the original flow is failed
1057[info] groupedWeighted
1058[info] - should emit grouped elements with custom cost function
1059[info] - should return failed flow when cost function throws exception
1060[info] - should return failed source when the original source is failed
1061[info] groupedWithin
1062[info] - should group first batch of elements due to limit and second batch due to timeout
1063[info] - should group first batch of elements due to timeout and second batch due to limit
1064[info] - should wake up on new element and send it immediately after first batch is sent and channel goes to time-out mode
1065[info] - should send the group only once when the channel is closed
1066[info] - should return failed source when the original source is failed
1067[info] groupedWeightedWithin
1068[info] - should group elements on timeout in the first batch and consider max weight in the remaining batches
1069[info] - should return failed source when cost function throws exception
1070[info] - should return failed source when the original source is failed
1071[info] MapParTest:
1072[info] mapPar
1073[info] - should output the same type as input
1074[info] - should run computations in parallel
1075[info] - should run not more computations than limit
1076[2026-01-13T14:18:35.046778586Z] [386] exception
1077[2026-01-13T14:18:35.049525372Z] [24] catch
1078[2026-01-13T14:18:35.349752516Z] [24] all done
1079[info] - should interrupt other computations in one fails
1080[info] RateLimiterInterfaceTest:
1081[info] RateLimiter interface
1082[info] - should drop or block operation depending on method used for fixed rate algorithm
1083[info] - should drop or block operation depending on method used for sliding window algorithm
1084[info] - should drop or block operation depending on method used for bucket algorithm
1085[info] - should drop or block operation concurrently
1086[info] UtilTest:
1087[info] discard
1088[2026-01-13T14:18:41.377349965Z] [24] in f
1089[info] - should do nothing
1090[info] tapException
1091[2026-01-13T14:18:41.378381313Z] [24] in callback: boom!
1092[2026-01-13T14:18:41.378540230Z] [24] in catch: boom!
1093[info] - should run the callback when an exception is thrown
1094[2026-01-13T14:18:41.379219019Z] [24] 42
1095[2026-01-13T14:18:41.379292406Z] [24] after
1096[info] - should not run the callback when no exception is thrown
1097[2026-01-13T14:18:41.381915419Z] [24] in catch: boom! 1
1098[info] - should suppress any additional exceptions
1099[info] pipe
1100[info] - should work
1101[info] tap
1102[2026-01-13T14:18:41.383706838Z] [24] Adding
1103[2026-01-13T14:18:41.383855526Z] [24] Got: 3
1104[info] - should work
1105[info] debug as extension
1106[info] - should work
1107[info] debug as top-level method
1108some label: 10
1109x.+(1) = 11
1110[info] - should work
1111[info] FlowOpsLastTest:
1112[info] last
1113[info] - should throw NoSuchElementException for the empty source
1114[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
1115[info] - should return last element for the non-empty source
1116[info] FlowOpsFailedTest:
1117[info] failed
1118[info] - should fail on receive
1119[info] FlowOpsFlattenTest:
1120[info] flatten
1121[info] - should flatten nested flows
1122[info] WeightedHeapTest:
1123[info] WeightedHeap
1124[info] - should allow inserting elements with weights
1125[info] - should allow extracting the minimum element
1126[info] - should return None when extracting from an empty heap
1127[info] - should return the correct size after operations
1128[info] - should handle empty heaps correctly
1129[info] - should update the weight of an existing element and adjust its position
1130[info] - should throw an exception when updating the weight of a non-existent element
1131[info] - should handle multiple insertions and updates correctly
1132[info] - should handle duplicate insertions by updating the existing element's weight
1133[info] - should handle increasing the weight of an existing element
1134[info] - should maintain heap property after multiple weight increases
1135[info] - should work correctly when increasing the weight of the current minimum element
1136[info] - should handle increasing weights in a large heap
1137[info] - should maintain the heap property after multiple operations
1138[info] - should work with large numbers of elements
1139[info] - should maintain heap property with random insertions and extractions
1140[info] - should maintain heap property with random weight updates
1141[info] FlowOpsMapConcatTest:
1142[info] mapConcat
1143[info] - should unfold iterables
1144[info] - should transform elements
1145[info] - should handle empty lists
1146[info] - should propagate errors in the mapping function
1147[info] FlowOpsPipeToTest:
1148[info] - should pipe one source to another
1149[info] - should pipe one source to another (with done propagation)
1150[info] FlowOpsRecoverTest:
1151[info] Flow.recover
1152[info] - should pass through elements when upstream flow succeeds
1153[info] - should emit recovery value when upstream flow fails with handled exception
1154[info] - should not emit recovery value when downstream flow fails with handled exception
1155[info] - should propagate unhandled exceptions
1156[info] - should handle multiple exception types
1157[info] - should work with different recovery value type
1158[info] - should handle exception thrown during flow processing
1159[info] - should work with empty flow
1160[info] - should propagate exception when partial function throws
1161[info] FlowOpsMapStatefulTest:
1162[info] mapStateful
1163[info] - should zip with index
1164[info] - should calculate a running total
1165[info] - should be able to emit different values than incoming ones
1166[info] - should propagate errors in the mapping function
1167[info] - should propagate errors in the completion callback
1168[info] FlowOpsMapTest:
1169[info] map
1170[info] - should map over a source
1171[info] - should map over a source using for-syntax
1172[info] ForkTest:
1173[info] fork
1174[2026-01-13T14:18:41.588607321Z] [24] main mid
1175[2026-01-13T14:18:42.088804027Z] [435] f1 complete
1176[2026-01-13T14:18:42.589426154Z] [436] f2 complete
1177[2026-01-13T14:18:42.589840204Z] [24] result = 11
1178[info] - should run two forks concurrently
1179[2026-01-13T14:18:42.592986729Z] [438] f2 complete
1180[2026-01-13T14:18:42.593200108Z] [437] f1 complete
1181[2026-01-13T14:18:42.593408517Z] [24] result = 11
1182[info] - should allow nested forks
1183[2026-01-13T14:18:42.594499273Z] [24] main mid
1184[2026-01-13T14:18:43.095252486Z] [439] f1 complete
1185[2026-01-13T14:18:43.095563864Z] [24] result = 5
1186[2026-01-13T14:18:43.095805700Z] [440] f2 interrupted
1187[info] - should interrupt child forks when parents complete
1188[2026-01-13T14:18:43.098839454Z] [444] in fork
1189[info] - should allow starting forks within a forkCancellable body, using the outer scope
1190[2026-01-13T14:18:43.200853034Z] [447] in fork
1191[info] - should allow starting forks in outer scope, from an inner scope
1192[2026-01-13T14:18:43.202549768Z] [449] IllegalStateException
1193[info] - should not allow starting forks from a thread created not by the scope
1194[info] FlowOpsIntersperseTest:
1195[info] Flow.intersperse
1196[info] - should intersperse with inject only over an empty source
1197[info] - should intersperse with inject only over a source with one element
1198[info] - should intersperse with inject only over a source with multiple elements
1199[info] - should intersperse with start, inject and end over an empty source
1200[info] - should intersperse with start, inject and end over a source with one element
1201[info] - should intersperse with start, inject and end over a source with multiple elements
1202[info] FlowOpsScanTest:
1203[info] scan
1204[info] - should scan the empty flow
1205[info] - should scan a flow of summed Int
1206[info] - should scan a flow of multiplied Int
1207[info] - should scan a flow of concatenated String
1208[info] FlowOpsUsingSinkTest:
1209[info] usingSink
1210[info] - should send the passed elements
1211[info] FlowOpsTakeTest:
1212[info] take
1213[info] - should take from a simple flow
1214[info] - should take from an async flow
1215[info] - should take all if the flow ends sooner than the desired number of elements
1216[info] EitherTest:
1217[info] either
1218[info] - should work correctly when invoked on eithers
1219[info] - should work correctly when invoked on options
1220[info] - should work correctly when invoked on fork
1221[info] - should report a proper compilation error when used outside of either:
1222[info] - should report a proper compilation error when wrong error type is used for ok() (explicit type params)
1223[info] - should report a proper compilation error when wrong successful type is used (explicit type params)
1224[info] - should report a proper compilation error when wrong type annotation is used for ok() (error)
1225[info] - should report a proper compilation error when wrong type annotation is used (success)
1226[info] - should report a proper compilation error when wrong error type is used for fail() (explicit type params)
1227[info] - should report a proper compilation error when wrong type annotation is used for fail() (error)
1228[info] - should catch non fatal exceptions
1229[info] - should not catch fatal exceptions
1230[info] - should provide an either scope when catching non fatal exceptions
1231[info] - should report a proper compilation error when wrong error type is used for ok() in catchingNonFatal block
1232[info] - should work when combined with mapPar
1233[info] - should not allow nesting of eithers
1234[info] orThrow
1235[info] - should unwrap the value for a Right-value
1236[info] - should throw exceptions for a Left-value
1237[info] catching
1238[info] - should catch given exceptions only
1239[info] - should catch parent exceptions
1240[info] - should not catch non-given exceptions
1241[info] - should not catch fatal exceptions
1242[info] - should return successful results as Right-values
1243[info] FlowIOOpsTest:
1244[info] asInputStream
1245[info] - should return an empty InputStream for an empty source
1246[info] - should return an InputStream for a simple source
1247[info] - should correctly track available bytes
1248[info] - should support bulk read operations with read(byte[])
1249[info] - should handle bulk read operations across multiple chunks
1250[info] - should handle bulk read with concatenated chunks (multiple backing arrays)
1251[info] - should handle read(byte[], offset, length) with various parameters
1252[info] - should handle edge cases for read(byte[], offset, length)
1253[info] - should throw appropriate exceptions for invalid read parameters
1254[info] - should maintain consistency between single-byte and bulk reads
1255[info] - should handle chunks with empty backing arrays
1256[info] - should handle flow with only empty chunks
1257[info] - should handle mixed empty and non-empty chunks in flow
1258[info] toOutputStream
1259[info] - should write a single chunk with bytes to an OutputStream
1260[info] - should write multiple chunks with bytes to an OutputStream
1261[info] - should write concatenated chunks to an OutputStream
1262[info] - should handle an empty Source
1263[info] - should close the OutputStream on write error
1264[info] - should close the OutputStream on error
1265[info] toFile
1266[info] - should open existing file and write a single chunk with bytes
1267[info] - should open existing file and write multiple chunks with bytes
1268[info] - should create file and write multiple chunks with bytes
1269[info] - should write concatenated chunks to a file
1270[info] - should use an existing file and overwrite it a single chunk with bytes
1271[info] - should handle an empty source
1272[info] - should throw an exception on failing Source
1273[info] - should throw an exception if path is a directory
1274[info] - should throw an exception if file cannot be opened
1275[info] FlowOpsConcatPrependTest:
1276[info] concat
1277[info] - should concat other source
1278[info] prepend
1279[info] - should prepend other source
1280[info] FlowTextOpsTest:
1281[info] linesUtf8
1282[info] - should split a single chunk of bytes into lines
1283[info] - should split a single chunk of bytes into lines (multiple newlines)
1284[info] - should split a single chunk of bytes into lines (beginning with newline)
1285[info] - should split a single chunk of bytes into lines (ending with newline)
1286[info] - should split a single chunk of bytes into lines (empty array)
1287[info] - should split a multiple chunks of bytes into lines
1288[info] - should split a multiple chunks of bytes into lines (multiple newlines)
1289[info] - should split a multiple chunks of bytes into lines (multiple empty chunks)
1290[info] lines(charset)
1291zażółć
1292gęślą
1293jaźń
1294[info] - should decode lines with specified charset
1295[info] - should decode lines correctly across chunk boundaries
1296[info] decodeStringUtf8
1297[info] - should decode a simple string
1298[info] - should decode a chunked string with UTF-8 multi-byte characters
1299[info] - should handle an empty Source
1300[info] - should handle partial BOM
1301[info] - should handle a string shorter than BOM
1302[info] - should handle empty chunks
1303[info] encodeUtf8
1304[info] - should handle empty String
1305[info] - should encode a string
1306[info] FlowOpsZipWithIndexTest:
1307[info] zipWithIndex
1308[info] - should not zip anything from an empty flow
1309[info] - should zip flow with index
1310[2026-01-13T14:18:43.429437717Z] [24] allocate
1311[2026-01-13T14:18:43.430359886Z] [550] release 1
1312[info] ResourceTest:
1313[info] useInScope
1314[info] - should release resources after allocation
1315[2026-01-13T14:18:43.433823435Z] [24] allocate 1
1316[2026-01-13T14:18:43.434572004Z] [24] allocate 2
1317[2026-01-13T14:18:43.435775362Z] [551] release 2
1318[2026-01-13T14:18:43.435958595Z] [551] release 1
1319[info] - should release resources in reverse order
1320[2026-01-13T14:18:43.438753741Z] [24] allocate 1
1321[2026-01-13T14:18:43.439153639Z] [24] allocate 2
1322[2026-01-13T14:18:43.440413629Z] [552] release 2
1323[2026-01-13T14:18:43.440576544Z] [552] release 1
1324[2026-01-13T14:18:43.441426548Z] [24] exception
1325[info] - should release resources when there's an exception
1326[2026-01-13T14:18:43.442551079Z] [24] allocate 1
1327[2026-01-13T14:18:43.443260919Z] [24] allocate 2
1328[2026-01-13T14:18:43.443523670Z] [553] release 2
1329[2026-01-13T14:18:43.444065593Z] [553] release 1
1330[2026-01-13T14:18:43.444545350Z] [24] exception e2
1331[info] - should release resources when there's an exception during releasing (normal resutl)
1332[2026-01-13T14:18:43.446109333Z] [24] allocate 1
1333[2026-01-13T14:18:43.446728471Z] [24] allocate 2
1334[2026-01-13T14:18:43.447374686Z] [554] release 2
1335[2026-01-13T14:18:43.447572254Z] [554] release 1
1336[2026-01-13T14:18:43.447821251Z] [24] exception e3
1337[info] - should release resources when there's an exception during releasing (exceptional resutl)
1338[2026-01-13T14:18:43.449619412Z] [24] in scope
1339[2026-01-13T14:18:43.450786066Z] [555] release
1340[2026-01-13T14:18:43.452950920Z] [24] allocate
1341[info] - should release registered resources
1342[2026-01-13T14:18:43.453041099Z] [24] in scope
1343[2026-01-13T14:18:43.453612679Z] [556] release
1344[info] - should use a resource
1345[2026-01-13T14:18:43.454426909Z] [24] allocate
1346[2026-01-13T14:18:43.454507850Z] [24] in scope
1347[2026-01-13T14:18:43.455108406Z] [557] release
1348[info] - should use a closeable resource
1349[2026-01-13T14:18:43.456092032Z] [24] allocate
1350[2026-01-13T14:18:43.456157244Z] [24] in scope
1351[2026-01-13T14:18:43.456782244Z] [558] release
1352[2026-01-13T14:18:43.457512237Z] [24] exception e2 (e1)
1353[info] - should add suppressed exception when there's an exception during releasing
1354[info] FlowOpsMapParTest:
1355[info] mapPar
1356[info] - should map over a flow with parallelism limit 1
1357[info] - should map over a flow with parallelism limit 2
1358[info] - should map over a flow with parallelism limit 3
1359[info] - should map over a flow with parallelism limit 4
1360[info] - should map over a flow with parallelism limit 5
1361[info] - should map over a flow with parallelism limit 6
1362[info] - should map over a flow with parallelism limit 7
1363[info] - should map over a flow with parallelism limit 8
1364[info] - should map over a flow with parallelism limit 9
1365[info] - should map over a flow with parallelism limit 10
1366[info] - should map over a flow with parallelism limit 10 (stress test)
1367[info] + iteration 1
1368[info] + iteration 2
1369[info] + iteration 3
1370[info] + iteration 4
1371[info] + iteration 5
1372[info] + iteration 6
1373[info] + iteration 7
1374[info] + iteration 8
1375[info] + iteration 9
1376[info] + iteration 10
1377[info] + iteration 11
1378[info] + iteration 12
1379[info] + iteration 13
1380[info] + iteration 14
1381[info] + iteration 15
1382[info] + iteration 16
1383[info] + iteration 17
1384[info] + iteration 18
1385[info] + iteration 19
1386[info] + iteration 20
1387[info] + iteration 21
1388[info] + iteration 22
1389[info] + iteration 23
1390[info] + iteration 24
1391[info] + iteration 25
1392[info] + iteration 26
1393[info] + iteration 27
1394[info] + iteration 28
1395[info] + iteration 29
1396[info] + iteration 30
1397[info] + iteration 31
1398[info] + iteration 32
1399[info] + iteration 33
1400[info] + iteration 34
1401[info] + iteration 35
1402[info] + iteration 36
1403[info] + iteration 37
1404[info] + iteration 38
1405[info] + iteration 39
1406[info] + iteration 40
1407[info] + iteration 41
1408[info] + iteration 42
1409[info] + iteration 43
1410[info] + iteration 44
1411[info] + iteration 45
1412[info] + iteration 46
1413[info] + iteration 47
1414[info] + iteration 48
1415[info] + iteration 49
1416[info] + iteration 50
1417[info] + iteration 51
1418[info] + iteration 52
1419[info] + iteration 53
1420[info] + iteration 54
1421[info] + iteration 55
1422[info] + iteration 56
1423[info] + iteration 57
1424[info] + iteration 58
1425[info] + iteration 59
1426[info] + iteration 60
1427[info] + iteration 61
1428[info] + iteration 62
1429[info] + iteration 63
1430[info] + iteration 64
1431[info] + iteration 65
1432[info] + iteration 66
1433[info] + iteration 67
1434[info] + iteration 68
1435[info] + iteration 69
1436[info] + iteration 70
1437[info] + iteration 71
1438[info] + iteration 72
1439[info] + iteration 73
1440[info] + iteration 74
1441[info] + iteration 75
1442[info] + iteration 76
1443[info] + iteration 77
1444[info] + iteration 78
1445[info] + iteration 79
1446[info] + iteration 80
1447[info] + iteration 81
1448[info] + iteration 82
1449[info] + iteration 83
1450[info] + iteration 84
1451[info] + iteration 85
1452[info] + iteration 86
1453[info] + iteration 87
1454[info] + iteration 88
1455[info] + iteration 89
1456[info] + iteration 90
1457[info] + iteration 91
1458[info] + iteration 92
1459[info] + iteration 93
1460[info] + iteration 94
1461[info] + iteration 95
1462[info] + iteration 96
1463[info] + iteration 97
1464[info] + iteration 98
1465[info] + iteration 99
1466[info] + iteration 100
1467[info] - should propagate errors
1468[2026-01-13T14:18:52.076382503Z] [1916] done
1469[2026-01-13T14:18:52.076382544Z] [1915] done
1470[2026-01-13T14:18:52.176906515Z] [1918] exception
1471[info] - should cancel other running forks when there's an error
1472[info] - should handle empty flow
1473[info] - should handle flow with exactly parallelism number of elements
1474[info] - should handle flow with less than parallelism number of elements
1475[info] - should preserve order even with varying processing times
1476[info] - should preserve order with random processing times
1477[info] - should work with very high parallelism values
1478[info] SelectOrClosedWithinTest:
1479[info] selectOrClosedWithin
1480[info] - should select a clause that can complete immediately
1481[info] - should return timeout when no clause can complete within the timeout
1482[info] - should select a source that has a value immediately
1483[info] - should return timeout when no source has a value within the timeout
1484[info] - should work with different timeout value types
1485[info] - should handle empty clauses sequence
1486[info] - should handle empty sources sequence
1487[info] selectOrClosedWithin with single clause
1488[info] - should complete when clause is ready
1489[info] - should timeout when clause is not ready
1490[info] selectOrClosedWithin with multiple clauses
1491[info] - should select the first ready clause
1492[info] - should timeout when no clauses are ready
1493[info] selectOrClosedWithin with sources
1494[info] - should select from ready source
1495[info] - should timeout when no sources are ready
1496[info] selectOrClosedWithin error scenarios
1497[info] - should handle channel closed with done
1498[info] - should handle channel closed with error
1499[info] - should prioritize ready channels over closed ones
1500[info] selectOrClosedWithin with different timeout types
1501[info] - should work with various timeout value types
1502[info] selectOrClosedWithin with sequences
1503[info] - should handle empty sequences
1504[info] - should handle sequence of clauses
1505[info] - should handle sequence of sources
1506[info] selectOrClosedWithin with various arities
1507[info] - should work with all supported clause counts
1508[info] - should work with all supported source counts
1509[info] FlowOpsSplitTest:
1510[info] split
1511[info] - should split an empty flow
1512[info] - should split a flow with no delimiters
1513[info] - should split a flow with delimiter at the beginning
1514[info] - should split a flow with delimiter at the end
1515[info] - should split a flow with delimiter in the middle
1516[info] - should split a flow with multiple delimiters
1517[info] - should split a flow with adjacent delimiters
1518[info] - should split a flow with only delimiters
1519[info] - should split a flow with single delimiter
1520[info] - should split a flow with single non-delimiter
1521[info] - should split a flow with multiple consecutive delimiters at the beginning
1522[info] - should split a flow with multiple consecutive delimiters at the end
1523[info] - should split a flow with string delimiters
1524[info] - should split a flow using complex predicate
1525[info] - should handle error propagation
1526[info] - should split a large flow efficiently
1527[info] JitterTest:
1528[info] Jitter
1529[info] - should use no jitter
1530[info] - should use full jitter
1531[info] - should use equal jitter
1532[info] - should use decorrelated jitter
1533[info] FlowOpsAlsoToTest:
1534[info] alsoTo
1535[info] - should send to both sinks
1536[info] - should send to both sinks and not hang when other sink is rendezvous channel
1537[info] - should close main flow when other closes
1538[info] - should close main flow with error when other errors
1539[info] - should close other channel with error when main errors
1540[info] FlowOpsBufferTest:
1541[info] buffer
1542[info] - should work with a single async boundary
1543[info] - should work with multiple async boundaries
1544[info] - should propagate errors
1545[info] BackoffRetryTest:
1546[info] Backoff retry
1547[info] - should retry a function
1548[info] - should retry a failing function forever
1549[info] - should respect maximum delay
1550[info] - should use jitter
1551[info] - should retry an Either
1552[info] FlowOpsEnsureTest:
1553[info] ensure.onComplete
1554[info] - should run in case of success
1555[info] - should run in case of error
1556[info] ensure.onDone
1557[info] - should run in case of success
1558[info] - should not run in case of error
1559[info] ensure.onError
1560[info] - should not run in case of success
1561[info] - should run in case of error
1562[info] FlowOpsTakeLastTest:
1563[info] takeLast
1564[info] - should throw ChannelClosedException.Error for source failed without exception
1565[info] - should fail to takeLast when n < 0
1566[info] - should return empty list for the empty source
1567[info] - should return empty list when n == 0 and list is not empty
1568[info] - should return list with all elements if the source is smaller than requested number
1569[info] - should return the last n elements from the source
1570[info] FlowOpsZipAllTest:
1571[info] zipAll
1572[info] - should not emit any element when both flows are empty
1573[info] - should emit this element when other flow is empty
1574[info] - should emit other element when this flow is empty
1575[info] - should emit matching elements when both flows are of the same size
1576[info] - should emit default for other flow if this flow is longer
1577[info] - should emit default for this flow if other flow is longer
1578[info] FlowPublisherTckTest:
1579[info] - required_createPublisher1MustProduceAStreamOfExactly1Element
1580[info] - required_createPublisher3MustProduceAStreamOfExactly3Elements
1581[info] - required_validate_maxElementsFromPublisher
1582[info] - required_validate_boundedDepthOfOnNextAndRequestRecursion
1583[info] - required_spec101_subscriptionRequestMustResultInTheCorrectNumberOfProducedElements
1584[info] - required_spec102_maySignalLessThanRequestedAndTerminateSubscription
1585[info] - stochastic_spec103_mustSignalOnMethodsSequentially
1586[info] - optional_spec104_mustSignalOnErrorWhenFails
1587[info] - required_spec105_mustSignalOnCompleteWhenFiniteStreamTerminates
1588[info] - optional_spec105_emptyStreamMustTerminateBySignallingOnComplete
1589[info] - required_spec107_mustNotEmitFurtherSignalsOnceOnCompleteHasBeenSignalled
1590[info] - untested_spec107_mustNotEmitFurtherSignalsOnceOnErrorHasBeenSignalled !!! IGNORED !!!
1591[info] - untested_spec109_subscribeShouldNotThrowNonFatalThrowable !!! IGNORED !!!
1592[info] - required_spec109_subscribeThrowNPEOnNullSubscriber
1593[info] - required_spec109_mustIssueOnSubscribeForNonNullSubscriber
1594[info] - required_spec109_mayRejectCallsToSubscribeIfPublisherIsUnableOrUnwillingToServeThemRejectionMustTriggerOnErrorAfterOnSubscribe
1595[info] - untested_spec110_rejectASubscriptionRequestIfTheSameSubscriberSubscribesTwice !!! IGNORED !!!
1596[info] - optional_spec111_maySupportMultiSubscribe
1597[info] - optional_spec111_registeredSubscribersMustReceiveOnNextOrOnCompleteSignals
1598[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingOneByOne
1599[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfront
1600[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfrontAndCompleteAsExpected
1601[info] - required_spec302_mustAllowSynchronousRequestCallsFromOnNextAndOnSubscribe
1602[info] - required_spec303_mustNotAllowUnboundedRecursion
1603[info] - untested_spec304_requestShouldNotPerformHeavyComputations !!! IGNORED !!!
1604[info] - untested_spec305_cancelMustNotSynchronouslyPerformHeavyComputation !!! IGNORED !!!
1605[info] - required_spec306_afterSubscriptionIsCancelledRequestMustBeNops
1606[info] - required_spec307_afterSubscriptionIsCancelledAdditionalCancelationsMustBeNops
1607[info] - required_spec309_requestZeroMustSignalIllegalArgumentException
1608[info] - required_spec309_requestNegativeNumberMustSignalIllegalArgumentException
1609[info] - required_spec312_cancelMustMakeThePublisherToEventuallyStopSignaling
1610[info] - required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber
1611[info] - required_spec317_mustSupportAPendingElementCountUpToLongMaxValue
1612[info] - required_spec317_mustSupportACumulativePendingElementCountUpToLongMaxValue
1613[info] - required_spec317_mustNotSignalOnErrorWhenPendingAboveLongMaxValue
1614[info] - optional_spec309_requestNegativeNumberMaySignalIllegalArgumentExceptionWithSpecificMessage
1615[info] - untested_spec108_possiblyCanceledSubscriptionShouldNotReceiveOnErrorOrOnCompleteSignals !!! IGNORED !!!
1616[info] - untested_spec106_mustConsiderSubscriptionCancelledAfterOnErrorOrOnCompleteHasBeenCalled !!! IGNORED !!!
1617[info] SourceOpsFactoryMethodsTest:
1618[info] Source factory methods
1619[info] - should create a source from a fork
1620[info] CancelTest:
1621[info] cancel
1622[2026-01-13T14:19:00.451281360Z] [2410] started
1623[2026-01-13T14:19:00.551624879Z] [2410] interrupted
1624[2026-01-13T14:19:01.052123343Z] [2410] interrupted done
1625[2026-01-13T14:19:01.052514327Z] [2408] cancel done
1626[info] - should block until the fork completes
1627[2026-01-13T14:19:02.055311693Z] [2411] cancel done
1628[2026-01-13T14:19:02.157250567Z] [2416] interrupted
1629[2026-01-13T14:19:02.257630553Z] [2416] interrupted done
1630[2026-01-13T14:19:02.257920329Z] [2414] cancel done
1631[2026-01-13T14:19:02.358566189Z] [2417] cancel done
1632[2026-01-13T14:19:02.460523867Z] [2422] interrupted
1633[2026-01-13T14:19:02.560908181Z] [2422] interrupted done
1634[2026-01-13T14:19:02.561144200Z] [2420] cancel done
1635[2026-01-13T14:19:02.661775173Z] [2423] cancel done
1636[2026-01-13T14:19:02.763736139Z] [2428] interrupted
1637[2026-01-13T14:19:02.864147364Z] [2428] interrupted done
1638[2026-01-13T14:19:02.864397985Z] [2426] cancel done
1639[2026-01-13T14:19:02.965056068Z] [2429] cancel done
1640[2026-01-13T14:19:03.067000997Z] [2434] interrupted
1641[2026-01-13T14:19:03.167389811Z] [2434] interrupted done
1642[2026-01-13T14:19:03.167635623Z] [2432] cancel done
1643[2026-01-13T14:19:03.268290801Z] [2435] cancel done
1644[2026-01-13T14:19:03.370177988Z] [2440] interrupted
1645[2026-01-13T14:19:03.470546894Z] [2440] interrupted done
1646[2026-01-13T14:19:03.470805452Z] [2438] cancel done
1647[2026-01-13T14:19:03.571442328Z] [2441] cancel done
1648[2026-01-13T14:19:03.673272796Z] [2446] interrupted
1649[2026-01-13T14:19:03.773628998Z] [2446] interrupted done
1650[2026-01-13T14:19:03.773883930Z] [2444] cancel done
1651[2026-01-13T14:19:03.874541002Z] [2447] cancel done
1652[2026-01-13T14:19:03.976436727Z] [2452] interrupted
1653[2026-01-13T14:19:04.076829754Z] [2452] interrupted done
1654[2026-01-13T14:19:04.077076993Z] [2450] cancel done
1655[2026-01-13T14:19:04.177691179Z] [2453] cancel done
1656[2026-01-13T14:19:04.279598643Z] [2458] interrupted
1657[2026-01-13T14:19:04.379965581Z] [2458] interrupted done
1658[2026-01-13T14:19:04.380187917Z] [2456] cancel done
1659[2026-01-13T14:19:04.480810632Z] [2459] cancel done
1660[2026-01-13T14:19:04.582755146Z] [2464] interrupted
1661[2026-01-13T14:19:04.683144395Z] [2464] interrupted done
1662[2026-01-13T14:19:04.683403961Z] [2462] cancel done
1663[2026-01-13T14:19:04.784035287Z] [2465] cancel done
1664[2026-01-13T14:19:04.885783541Z] [2470] interrupted
1665[2026-01-13T14:19:04.986162314Z] [2470] interrupted done
1666[2026-01-13T14:19:04.986447780Z] [2468] cancel done
1667[info] - should block until the fork completes (stress test)
1668[info] + iteration 1
1669[info] + iteration 2
1670[info] + iteration 3
1671[info] + iteration 4
1672[info] + iteration 5
1673[info] + iteration 6
1674[info] + iteration 7
1675[info] + iteration 8
1676[info] + iteration 9
1677[info] + iteration 10
1678[info] + iteration 11
1679[info] + iteration 12
1680[info] + iteration 13
1681[info] + iteration 14
1682[info] + iteration 15
1683[info] + iteration 16
1684[info] + iteration 17
1685[info] + iteration 18
1686[info] + iteration 19
1687[info] + iteration 20
1688[info] cancelNow
1689[2026-01-13T14:19:05.189724133Z] [2471] cancel done
1690[2026-01-13T14:19:05.690069895Z] [2473] interrupted done
1691[info] - should return immediately, and wait for forks when scope completes
1692[info] - should (when followed by a joinEither) catch InterruptedException with which a fork ends
1693[info] FlowOpsTapTest:
1694[info] - should tap over a flow
1695[info] FlowOpsAlsoToTapTest:
1696[info] alsoToTap
1697[info] - should send to both sinks when other is faster
1698[info] - should send to both sinks when other is slower
1699[info] - should not fail the flow when the other sink fails
1700[info] - should not close the flow when the other sink closes
1701[info] SourceOpsFailedTest:
1702[info] Source.failed
1703[info] - should fail on receive
1704[info] - should be in error
1705[info] FlowOpsDebounceTest:
1706[info] debounce
1707[info] - should not debounce if applied on an empty flow
1708[info] - should not debounce if applied on a flow containing only distinct values
1709[info] - should debounce if applied on a flow containing only repeating values
1710[info] - should debounce if applied on a flow containing repeating elements
1711[info] FlowOpsThrottleTest:
1712[info] throttle
1713[info] - should not throttle the empty source
1714[info] - should throttle to specified elements per time units
1715[info] - should fail to throttle when elements <= 0
1716[info] - should fail to throttle when per lower than 1ms
1717[info] FlowOpsRunToChannelTest:
1718[info] runToChannel
1719[info] - should receive the elements in the flow
1720[info] - should return the original source when running a source-backed flow
1721[info] FlowOpsTimeoutTest:
1722[info] - should timeout
1723[info] FlowOpsZipTest:
1724[info] - should zip two sources
1725[info] FixedRateRepeatTest:
1726[info] repeat
1727[info] - should repeat a function at fixed rate
1728[info] - should repeat a function at fixed rate with initial delay
1729[info] - should repeat a function forever at fixed rate
1730[info] - should repeat a function forever at fixed rate with initial delay
1731[info] ForeachParTest:
1732[info] foreachPar
1733[2026-01-13T14:19:07.827688194Z] [2505] 3
1734[2026-01-13T14:19:07.827605837Z] [2503] 1
1735[2026-01-13T14:19:07.827604351Z] [2502] 0
1736[2026-01-13T14:19:07.827631008Z] [2504] 2
1737[2026-01-13T14:19:07.827744837Z] [2506] 4
1738[2026-01-13T14:19:07.928162472Z] [2507] 5
1739[2026-01-13T14:19:07.928181921Z] [2508] 6
1740[2026-01-13T14:19:07.928231118Z] [2509] 7
1741[2026-01-13T14:19:07.928252164Z] [2510] 8
1742[2026-01-13T14:19:07.928286728Z] [2511] 9
1743[2026-01-13T14:19:08.028762814Z] [2514] 12
1744[2026-01-13T14:19:08.028955820Z] [2516] 14
1745[2026-01-13T14:19:08.028732753Z] [2513] 11
1746[2026-01-13T14:19:08.028727497Z] [2512] 10
1747[2026-01-13T14:19:08.028947989Z] [2515] 13
1748[2026-01-13T14:19:08.129424446Z] [2517] 15
1749[2026-01-13T14:19:08.129443798Z] [2518] 16
1750[2026-01-13T14:19:08.129468247Z] [2519] 17
1751[info] - should run computations in parallel
1752[info] - should run not more computations than limit
1753[2026-01-13T14:19:08.467039175Z] [2684] exception
1754[2026-01-13T14:19:08.467757659Z] [24] catch
1755[2026-01-13T14:19:08.767940826Z] [24] all done
1756[info] - should interrupt other computations in one fails
1757[info] FlowOpsFutureSourceTest:
1758[info] futureSource
1759[info] - should return the original future failure when future fails
1760[info] - should return future's source values
1761[info] SourceOpsTransformTest:
1762[info] Source.transform
1763[info] - should transform a source using a simple map
1764[info] - should transform a source using a complex chain of operations
1765[info] - should transform an infinite source
1766[info] - should transform an infinite source (stress test)
1767[info] RateLimiterTest:
1768[info] fixed rate RateLimiter
1769[info] - should drop operation when rate limit is exceeded
1770[info] - should restart rate limiter after given duration
1771[info] - should block operation when rate limit is exceeded
1772[info] - should respect time constraints when blocking
1773[info] - should respect time constraints when blocking concurrently
1774[info] - should allow to run more long running operations concurrently than max rate when not considering operation's time
1775[info] - should not allow to run more long running operations concurrently than max rate when considering operation time
1776[info] sliding window RateLimiter
1777[info] - should drop operation when rate limit is exceeded
1778[info] - should restart rate limiter after given duration
1779[info] - should block operation when rate limit is exceeded
1780[info] - should respect time constraints when blocking
1781[info] - should respect time constraints when blocking concurrently
1782[info] - should not allow to run more operations when operations are still running when considering operation time
1783[info] - should not allow to run more operations when operations are still running in window span when considering operation time
1784[info] bucket RateLimiter
1785[info] - should drop operation when rate limit is exceeded
1786[info] - should refill token after time elapsed from last refill and not before
1787[info] - should block operation when rate limit is exceeded
1788[info] - should respect time constraints when blocking
1789[info] - should respect time constraints when blocking concurrently
1790[info] FlowOpsSplitOnTest:
1791[info] splitOn
1792[info] - should split an empty flow
1793[info] - should split a flow with no delimiters
1794[info] - should split a flow with single-element delimiter at the beginning
1795[info] - should split a flow with single-element delimiter at the end
1796[info] - should split a flow with single-element delimiter in the middle
1797[info] - should split a flow with multiple single-element delimiters
1798[info] - should split a flow with adjacent single-element delimiters
1799[info] - should split a flow with only single-element delimiters
1800[info] - should split a flow with multi-element delimiter at the beginning
1801[info] - should split a flow with multi-element delimiter at the end
1802[info] - should split a flow with multi-element delimiter in the middle
1803[info] - should split a flow with multiple multi-element delimiters
1804[info] - should split a flow with adjacent multi-element delimiters
1805[info] - should split a flow with only multi-element delimiters
1806[info] - should split a flow with overlapping patterns
1807[info] - should split a flow with complex overlapping patterns
1808[info] - should handle empty delimiter by returning entire input as single chunk
1809[info] - should handle empty delimiter with empty input
1810[info] - should split a flow with string elements
1811[info] - should split a flow with multi-element string delimiter
1812[info] - should handle delimiter longer than input
1813[info] - should handle single element matching start of multi-element delimiter
1814[info] - should handle partial delimiter match at end
1815[info] - should split with delimiter that appears multiple times in sequence
1816[info] - should handle error propagation
1817[info] - should split a large flow efficiently
1818[info] - should handle repeated delimiter pattern correctly
1819[info] - should properly split when given a flow with delimiter patterns
1820[info] - should handle erroneous scenarios when delimiter processing fails
1821[info] FlowOpsMergeTest:
1822[info] merge
1823[info] - should merge two simple flows
1824[info] - should merge two async flows
1825[info] - should merge with a tick flow
1826[info] - should propagate error from the left
1827[info] - should propagate error from the right
1828[info] - should merge two flows, emitting all elements from the left when right completes
1829[info] - should merge two flows, emitting all elements from the right when left completes
1830[info] - should merge two flows, completing the resulting flow when the left flow completes
1831[info] - should merge two flows, completing the resulting flow when the right flow completes
1832[info] FlowOpsFlatMapTest:
1833[info] flatMap
1834[info] - should flatten simple flows
1835[info] - should propagate errors
1836[info] FlowCompanionIOOpsTest:
1837[info] fromInputStream
1838[info] - should handle an empty InputStream
1839[info] - should handle InputStream shorter than buffer size
1840[info] - should handle InputStream longer than buffer size
1841[info] - should close the InputStream after reading it
1842[info] - should close the InputStream after failing with an exception
1843[info] fromFile
1844[info] - should read content from a file smaller than chunk size
1845[info] - should read content from a file larger than chunk size
1846[info] - should handle an empty file
1847[info] - should throw an exception for missing file
1848[info] - should throw an exception if path is a directory
1849[info] CollectParTest:
1850[info] collectPar
1851[info] - should output the same type as input
1852[info] - should run computations in parallel
1853[info] - should run not more computations than limit
1854[2026-01-13T14:19:53.136184516Z] [5995] exception
1855[2026-01-13T14:19:53.136699391Z] [24] catch
1856[2026-01-13T14:19:53.436894679Z] [24] all done
1857[info] - should interrupt other computations in one fails
1858[info] FlowOpsFutureTest:
1859[info] future
1860[info] - should return the original future failure when future fails
1861[info] - should return future value
1862[info] FlowOpsInterleaveTest:
1863[info] interleave
1864[info] - should interleave with an empty source
1865[info] - should interleave two sources with default segment size
1866[info] - should interleave two sources with default segment size and different lengths
1867[info] - should interleave two sources with custom segment size
1868[info] - should interleave two sources with custom segment size and different lengths
1869[info] - should interleave two sources with different lengths and complete eagerly
1870[info] - should when empty, interleave with a non-empty source and complete eagerly
1871[info] - should interleave with an empty source and complete eagerly
1872[info] ParTest:
1873[info] par
1874[2026-01-13T14:19:53.557880299Z] [6025] b
1875[2026-01-13T14:19:53.657885960Z] [6024] a
1876[2026-01-13T14:19:53.658217034Z] [24] done
1877[info] - should run computations in parallel
1878[2026-01-13T14:19:53.760271853Z] [6028] exception
1879[2026-01-13T14:19:53.760917725Z] [24] catch
1880[2026-01-13T14:19:54.061142307Z] [24] all done
1881[info] - should interrupt other computations in one fails
1882[info] parLimit
1883[info] - should run up to the given number of computations in parallel
1884[2026-01-13T14:19:54.767505029Z] [6040] x
1885[2026-01-13T14:19:54.767505002Z] [6041] x
1886[2026-01-13T14:19:54.777874162Z] [6043] exception
1887[2026-01-13T14:19:54.778305498Z] [24] catch
1888[2026-01-13T14:19:55.078501667Z] [24] all done
1889[info] - should interrupt other computations in one fails
1890[info] parEither
1891[2026-01-13T14:19:55.180383248Z] [6047] b
1892[2026-01-13T14:19:55.280384863Z] [6046] a
1893[2026-01-13T14:19:55.281215804Z] [24] done
1894[info] - should run computations in parallel
1895[2026-01-13T14:19:55.382614032Z] [6050] exception
1896[2026-01-13T14:19:55.683314923Z] [24] all done
1897[info] - should interrupt other computations in one fails
1898[info] SelectWithinTest:
1899[info] selectWithin
1900[info] - should select a clause that can complete immediately
1901[info] - should throw TimeoutException when no clause can complete within the timeout
1902[info] - should select a source that has a value immediately
1903[info] - should throw TimeoutException when no source has a value within the timeout
1904[info] - should work with single clause
1905[info] - should work with three clauses
1906[info] - should work with four clauses
1907[info] - should work with five clauses
1908[info] - should work with sequence of clauses
1909[info] selectWithin with sources
1910[info] - should work with single source
1911[info] - should work with two sources
1912[info] - should work with three sources
1913[info] - should work with four sources
1914[info] - should work with five sources
1915[info] - should work with sequence of sources
1916[info] selectWithin timeout scenarios
1917[info] - should throw TimeoutException for single clause timeout
1918[info] - should throw TimeoutException for single source timeout
1919[info] - should throw TimeoutException for sequence of clauses timeout
1920[info] - should throw TimeoutException for sequence of sources timeout
1921[info] - should throw TimeoutException immediately for empty sequence of clauses
1922[info] - should throw TimeoutException immediately for empty sequence of sources
1923[info] selectWithin error scenarios
1924[info] - should throw ChannelClosedException when channel is closed with done
1925[info] - should throw ChannelClosedException when channel is closed with error
1926[info] - should prioritize ready channels over closed ones
1927[info] selectWithin performance
1928[info] - should not timeout when clause can complete immediately
1929[info] - should respect timeout duration
1930[info] selectWithin with send clauses
1931[info] - should work with send clauses
1932[info] - should throw TimeoutException when send clauses cannot complete
1933[info] ImmediateRepeatTest:
1934[info] repeat
1935[info] - should repeat a function immediately
1936[info] - should repeat a function immediately with initial delay
1937[info] - should repeat a function immediately forever
1938[info] - should repeat a function immediately forever with initial delay
1939[info] FlowOpsDebounceByTest:
1940[info] debounceBy
1941[info] - should not debounce if applied on an empty flow
1942[info] - should not debounce if applied on a flow containing only distinct f(value)
1943[info] - should debounce if applied on a flow containing repeating f(value)
1944[info] - should debounce subsequent odd/prime numbers
1945[info] RaceTest:
1946[info] timeout
1947[2026-01-13T14:19:57.062698441Z] [24] timeout
1948[2026-01-13T14:19:57.062838993Z] [24] done
1949[info] - should short-circuit a long computation
1950[2026-01-13T14:19:59.564257915Z] [6112] no timeout
1951[2026-01-13T14:19:59.564623520Z] [24] done
1952[info] - should not interrupt a short computation
1953[info] timeoutOption
1954[2026-01-13T14:20:02.566634503Z] [24] done: None
1955[info] - should short-circuit a long computation
1956[info] race
1957[2026-01-13T14:20:05.068608409Z] [6117] fast
1958[info] - should race a slower and faster computation
1959[2026-01-13T14:20:06.571167552Z] [6118] fast
1960[info] - should race a faster and slower computation
1961[2026-01-13T14:20:07.773253163Z] [6120] error
1962[2026-01-13T14:20:08.073237389Z] [6121] slow
1963[info] - should return the first successful computation to complete
1964[info] - should add other exceptions as suppressed
1965[info] - should treat ControlThrowable as a non-fatal exception
1966[info] - should immediately rethrow other fatal exceptions
1967[info] raceEither
1968[2026-01-13T14:20:09.680328990Z] [6131] error
1969[2026-01-13T14:20:09.980360955Z] [6132] slow
1970[info] - should return the first successful computation to complete
1971[info] raceResult
1972[info] - should immediately return when a normal exception occurs
1973[info] - should immediately return when a control exception occurs
1974[info] - should immediately return when a fatal exception occurs
1975[info] SourceOpsFutureSourceTest:
1976[info] SourceOps.futureSource
1977[info] - should return the original future failure when future fails
1978[info] - should return the original future failure when future fails with ExecutionException
1979[info] - should return future's source values
1980[info] FilterParTest:
1981[info] filterPar
1982[info] - should output the same type as input
1983[info] - should run computations in parallel
1984[info] - should run not more computations than limit
1985[2026-01-13T14:20:11.729289861Z] [6337] exception
1986[2026-01-13T14:20:11.729797342Z] [24] catch
1987[2026-01-13T14:20:12.029988406Z] [24] all done
1988[info] - should interrupt other computations in one fails
1989[info] SourceOpsForeachTest:
1990[info] Source.foreach
1991[info] - should iterate over a source
1992[info] - should iterate over a source using for-syntax
1993[info] - should convert source to a list
1994[info] FlowOpsEmptyTest:
1995[info] empty
1996[info] - should be empty
1997[info] SupervisedTest:
1998[info] supervised
1999[2026-01-13T14:20:12.138887413Z] [6343] b
2000[2026-01-13T14:20:12.238797735Z] [6342] a
2001[2026-01-13T14:20:12.239091525Z] [24] done
2002[info] - should wait until all forks complete
2003[2026-01-13T14:20:12.340766193Z] [6346] b
2004[2026-01-13T14:20:12.341201393Z] [24] done
2005[info] - should only wait until user forks complete
2006[2026-01-13T14:20:12.443230369Z] [6350] b
2007[2026-01-13T14:20:12.543760586Z] [24] done
2008[info] - should interrupt once any fork ends with an exception
2009[2026-01-13T14:20:12.745888889Z] [24] done
2010[info] - should interrupt main body once a fork ends with an exception
2011[2026-01-13T14:20:12.847606546Z] [6356] b
2012[2026-01-13T14:20:13.047379113Z] [6354] a
2013[2026-01-13T14:20:13.047648912Z] [24] done
2014[info] - should not interrupt if an unsupervised fork ends with an exception
2015[info] - should handle interruption of multiple forks with `joinEither` correctly
2016[info] SourceOpsFutureTest:
2017[info] Source.future
2018[info] - should return the original future failure when future fails
2019[info] - should return the original future failure when future fails with ExecutionException
2020[info] - should return future value
2021[info] CircuitBreakerStateMachineTest:
2022[info] Circuit Breaker state machine
2023[info] - should keep closed with healthy metrics
2024[info] - should go to open after surpassing failure threshold
2025[info] - should go straight to half open after surpassing failure threshold with defined waitDurationOpenState = 0
2026[info] - should go back to open after timeout in half open passed
2027[info] - should update counter of completed operations in halfOpen state
2028[info] - should go back to closed after enough calls with good metrics are recorded
2029[info] - should go to open after enough calls with bad metrics are recorded in halfOpen state
2030[info] - should go to closed after enough calls with good metrics are recorded in halfOpen state
2031[info] - should go to half open after waitDurationOpenState passes
2032[info] OxAppTest:
2033[info] OxApp
2034[info] - should work in happy case
2035[info] OxApp
2036Clean shutdown timed out after 100 milliseconds, exiting.
2037[info] - should shutdown despite cleanup taking a long time
2038[info] OxApp
2039[info] - should work in interrupted case
2040[info] OxApp
2041[info] - should work in failed case
2042[info] OxApp
2043[info] - should report any non-interrupted exceptions that occur during shutdown
2044[info] OxApp.Simple
2045[info] - should work in happy case
2046[info] OxApp.Simple
2047[info] - should work in interrupted case
2048[info] OxApp.Simple
2049[info] - should work in failed case
2050[info] OxApp.WithErrors
2051[info] - should work in happy case
2052[info] OxApp.WithErrors
2053[info] - should work in interrupted case
2054[info] OxApp.WithErrors
2055[info] - should work in failed case
2056[info] FlowOpsMapStatefulConcatTest:
2057[info] mapStatefulConcat
2058[info] - should deduplicate
2059[info] - should count consecutive
2060[info] - should propagate errors in the mapping function
2061[info] - should propagate errors in the completion callback
2062[info] FlowOpsDropTest:
2063[info] drop
2064[info] - should not drop from the empty flow
2065[info] - should drop elements from the source
2066[info] - should return empty source when more elements than source length was dropped
2067[info] - should not drop when 'n == 0'
2068[info] FlowOpsRepeatEvalTest:
2069[info] repeatEval
2070[info] - should evaluate the element before each send
2071[info] - should evaluate the element before each send, as long as it's defined
2072[info] FlowPublisherPekkoTest:
2073[INFO] [01/13/2026 15:20:13.857] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2074[info] - a simple flow should emit elements to be processed by a pekko stream
2075[INFO] [01/13/2026 15:20:14.104] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2076[info] - a concurrent flow should emit elements to be processed by a pekko stream
2077[INFO] [01/13/2026 15:20:14.145] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2078[info] - create a flow from a simple publisher
2079[INFO] [01/13/2026 15:20:14.435] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2080[info] - create a flow from a concurrent publisher
2081[info] ImmediateRetryTest:
2082[info] Immediate retry
2083[info] - should retry a succeeding function
2084[info] - should fail fast when a function is not worth retrying
2085[info] - should retry a succeeding function with a custom success condition
2086[info] - should retry a failing function
2087[info] - should retry a failing function forever
2088[info] - should retry a succeeding Either
2089[info] - should fail fast when an Either is not worth retrying
2090[info] - should retry a succeeding Either with a custom success condition
2091[info] - should retry a failing Either
2092[info] Adaptive retry with immediate config
2093[info] - should retry a failing adaptive
2094[info] - should stop retrying after emptying bucket
2095[info] - should not pay exceptionCost if result T is going to be retried and shouldPayPenaltyCost returns false
2096[info] FlowOpsGroupByTest:
2097[info] groupBy
2098[info] - should handle empty flow
2099[info] - should handle single-element flow
2100[info] - should handle single-element flow (stress test)
2101[info] - should create simple groups without reaching parallelism limit
2102[info] - should complete groups when the parallelism limit is reached
2103[info] - should not exceed the parallelism limit, completing earliest-active child flows as done when necessary
2104[info] - should handle large flows
2105[info] - should handle non-integer grouping keys
2106[info] - should group when child processing is slow
2107[info] - should propagate errors from child flows
2108[info] - should propagate errors from child flows when the parent is blocked on sending
2109[info] - should RuntimeException errors from parent flows
2110[info] - should throw an IllegalStateException when a child stream is completed by user-provided transformation
2111[info] FlowOpsLastOptionTest:
2112[info] lastOption
2113[info] - should return None for the empty flow
2114[info] - should return Some for a non-empty
2115[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
2116[info] FlowOpsMapParUnorderedTest:
2117[info] mapParUnordered
2118[info] - should map over a source with parallelism limit 1
2119[info] - should map over a source with parallelism limit 2
2120[info] - should map over a source with parallelism limit 3
2121[info] - should map over a source with parallelism limit 4
2122[info] - should map over a source with parallelism limit 5
2123[info] - should map over a source with parallelism limit 6
2124[info] - should map over a source with parallelism limit 7
2125[info] - should map over a source with parallelism limit 8
2126[info] - should map over a source with parallelism limit 9
2127[info] - should map over a source with parallelism limit 10
2128[info] - should map over a source with parallelism limit 10 (stress test)
2129[info] + iteration 1
2130[info] + iteration 2
2131[info] + iteration 3
2132[info] + iteration 4
2133[info] + iteration 5
2134[info] + iteration 6
2135[info] + iteration 7
2136[info] + iteration 8
2137[info] + iteration 9
2138[info] + iteration 10
2139[info] + iteration 11
2140[info] + iteration 12
2141[info] + iteration 13
2142[info] + iteration 14
2143[info] + iteration 15
2144[info] + iteration 16
2145[info] + iteration 17
2146[info] + iteration 18
2147[info] + iteration 19
2148[info] + iteration 20
2149[info] + iteration 21
2150[info] + iteration 22
2151[info] + iteration 23
2152[info] + iteration 24
2153[info] + iteration 25
2154[info] + iteration 26
2155[info] + iteration 27
2156[info] + iteration 28
2157[info] + iteration 29
2158[info] + iteration 30
2159[info] + iteration 31
2160[info] + iteration 32
2161[info] + iteration 33
2162[info] + iteration 34
2163[info] + iteration 35
2164[info] + iteration 36
2165[info] + iteration 37
2166[info] + iteration 38
2167[info] + iteration 39
2168[info] + iteration 40
2169[info] + iteration 41
2170[info] + iteration 42
2171[info] + iteration 43
2172[info] + iteration 44
2173[info] + iteration 45
2174[info] + iteration 46
2175[info] + iteration 47
2176[info] + iteration 48
2177[info] + iteration 49
2178[info] + iteration 50
2179[info] + iteration 51
2180[info] + iteration 52
2181[info] + iteration 53
2182[info] + iteration 54
2183[info] + iteration 55
2184[info] + iteration 56
2185[info] + iteration 57
2186[info] + iteration 58
2187[info] + iteration 59
2188[info] + iteration 60
2189[info] + iteration 61
2190[info] + iteration 62
2191[info] + iteration 63
2192[info] + iteration 64
2193[info] + iteration 65
2194[info] + iteration 66
2195[info] + iteration 67
2196[info] + iteration 68
2197[info] + iteration 69
2198[info] + iteration 70
2199[info] + iteration 71
2200[info] + iteration 72
2201[info] + iteration 73
2202[info] + iteration 74
2203[info] + iteration 75
2204[info] + iteration 76
2205[info] + iteration 77
2206[info] + iteration 78
2207[info] + iteration 79
2208[info] + iteration 80
2209[info] + iteration 81
2210[info] + iteration 82
2211[info] + iteration 83
2212[info] + iteration 84
2213[info] + iteration 85
2214[info] + iteration 86
2215[info] + iteration 87
2216[info] + iteration 88
2217[info] + iteration 89
2218[info] + iteration 90
2219[info] + iteration 91
2220[info] + iteration 92
2221[info] + iteration 93
2222[info] + iteration 94
2223[info] + iteration 95
2224[info] + iteration 96
2225[info] + iteration 97
2226[info] + iteration 98
2227[info] + iteration 99
2228[info] + iteration 100
2229[info] - should propagate errors
2230[2026-01-13T14:20:30.267704494Z] [208017] done
2231[2026-01-13T14:20:30.267704487Z] [208018] done
2232[2026-01-13T14:20:30.368239893Z] [208020] exception
2233[info] - should complete running forks and not start new ones when the mapping function fails
2234[2026-01-13T14:20:30.671489816Z] [208025] 2
2235[2026-01-13T14:20:30.671489831Z] [208024] 1
2236[info] - should complete running forks and not start new ones when the upstream fails
2237[2026-01-13T14:20:31.085565725Z] [208031] done
2238[2026-01-13T14:20:31.085573867Z] [208032] done
2239[2026-01-13T14:20:31.186285134Z] [208034] exception
2240[info] - should cancel running forks when the surrounding scope closes due to an error
2241[info] - should emit downstream as soon as a value is ready, regardless of the incoming order
2242[info] ControlTest:
2243[info] timeout
2244[2026-01-13T14:20:32.693762436Z] [24] timeout
2245[2026-01-13T14:20:32.693907777Z] [24] done
2246[info] - should short-circuit a long computation
2247[info] - should pass through the exception of failed computation
2248[2026-01-13T14:20:34.796529356Z] [208048] no timeout
2249[2026-01-13T14:20:34.796899154Z] [24] done
2250[info] - should not interrupt a short computation
2251[2026-01-13T14:20:37.198474748Z] [208050] done
2252[info] - should block a thread indefinitely
2253[info] timeoutOption
2254[info] - should pass through the exception of failed computation
2255[info] timeoutEither
2256[info] - should pass through the exception of failed computation
2257[info] FlowOpsConcatTest:
2258[info] - should concatenate flows
2259[info] - should concatenate flows using ++
2260[info] - should not evaluate subsequent flows if there's a failure
2261[info] FlowOpsFactoryMethodsTest:
2262[info] factory methods
2263[info] - should create a flow from a fork
2264[info] - should create an iterating flow
2265[info] - should unfold a function
2266[info] - should produce a range
2267[info] ChannelTest:
2268[info] channel with capacity 0
2269[info] - should send and receive two spaced elements
2270[info] - should send and receive many elements, with concurrent senders & receivers
2271[info] - should select from two receives, if the last one has elements
2272[info] - should select from three receives, if the last one has elements
2273[info] - should select a receive from multiple channels
2274[info] - should select a receive until all channels are done
2275[info] - should properly report channel state
2276[info] - should select from a non-done channel, if a value is immediately available
2277[info] - should select a done channel, when the channel is done immediately
2278[info] - should select a done channel, when the channel becomes done
2279[info] channel with capacity 1
2280[info] - should send and receive two spaced elements
2281[info] - should send and receive many elements, with concurrent senders & receivers
2282[info] - should select from two receives, if the last one has elements
2283[info] - should select from three receives, if the last one has elements
2284[info] - should select a receive from multiple channels
2285[info] - should select a receive until all channels are done
2286[info] - should properly report channel state
2287[info] - should select from a non-done channel, if a value is immediately available
2288[info] - should select a done channel, when the channel is done immediately
2289[info] - should select a done channel, when the channel becomes done
2290[info] channel with capacity 2
2291[info] - should send and receive two spaced elements
2292[info] - should send and receive many elements, with concurrent senders & receivers
2293[info] - should select from two receives, if the last one has elements
2294[info] - should select from three receives, if the last one has elements
2295[info] - should select a receive from multiple channels
2296[info] - should select a receive until all channels are done
2297[info] - should properly report channel state
2298[info] - should select from a non-done channel, if a value is immediately available
2299[info] - should select a done channel, when the channel is done immediately
2300[info] - should select a done channel, when the channel becomes done
2301[info] channel with capacity 100
2302[info] - should send and receive two spaced elements
2303[info] - should send and receive many elements, with concurrent senders & receivers
2304[info] - should select from two receives, if the last one has elements
2305[info] - should select from three receives, if the last one has elements
2306[info] - should select a receive from multiple channels
2307[info] - should select a receive until all channels are done
2308[info] - should properly report channel state
2309[info] - should select from a non-done channel, if a value is immediately available
2310[info] - should select a done channel, when the channel is done immediately
2311[info] - should select a done channel, when the channel becomes done
2312[info] channel with capacity 10000
2313[info] - should send and receive two spaced elements
2314[info] - should send and receive many elements, with concurrent senders & receivers
2315[info] - should select from two receives, if the last one has elements
2316[info] - should select from three receives, if the last one has elements
2317[info] - should select a receive from multiple channels
2318[info] - should select a receive until all channels are done
2319[info] - should properly report channel state
2320[info] - should select from a non-done channel, if a value is immediately available
2321[info] - should select a done channel, when the channel is done immediately
2322[info] - should select a done channel, when the channel becomes done
2323[info] buffered channel
2324[info] - should select a send when one is available
2325[info] channel
2326[info] - should receive from a channel until done
2327[info] - should not receive from a channel in case of an error
2328[info] rendezvous channel
2329[info] - should wait until elements are transmitted
2330[info] - should select a send when a receive is waiting
2331[info] - should select a send or receive depending on availability
2332[info] default
2333[info] - should use the default value if the clauses are not satisfiable
2334[info] - should not use the default value if a clause is satisfiable
2335[info] - should not use the default value if the channel is done
2336[info] - should use the default value once a source is done (buffered channel, stress test)
2337[info] + iteration 1
2338[info] + iteration 2
2339[info] + iteration 3
2340[info] + iteration 4
2341[info] + iteration 5
2342[info] + iteration 6
2343[info] + iteration 7
2344[info] + iteration 8
2345[info] + iteration 9
2346[info] + iteration 10
2347[info] + iteration 11
2348[info] + iteration 12
2349[info] + iteration 13
2350[info] + iteration 14
2351[info] + iteration 15
2352[info] + iteration 16
2353[info] + iteration 17
2354[info] + iteration 18
2355[info] + iteration 19
2356[info] + iteration 20
2357[info] + iteration 21
2358[info] + iteration 22
2359[info] + iteration 23
2360[info] + iteration 24
2361[info] + iteration 25
2362[info] + iteration 26
2363[info] + iteration 27
2364[info] + iteration 28
2365[info] + iteration 29
2366[info] + iteration 30
2367[info] + iteration 31
2368[info] + iteration 32
2369[info] + iteration 33
2370[info] + iteration 34
2371[info] + iteration 35
2372[info] + iteration 36
2373[info] + iteration 37
2374[info] + iteration 38
2375[info] + iteration 39
2376[info] + iteration 40
2377[info] + iteration 41
2378[info] + iteration 42
2379[info] + iteration 43
2380[info] + iteration 44
2381[info] + iteration 45
2382[info] + iteration 46
2383[info] + iteration 47
2384[info] + iteration 48
2385[info] + iteration 49
2386[info] + iteration 50
2387[info] + iteration 51
2388[info] + iteration 52
2389[info] + iteration 53
2390[info] + iteration 54
2391[info] + iteration 55
2392[info] + iteration 56
2393[info] + iteration 57
2394[info] + iteration 58
2395[info] + iteration 59
2396[info] + iteration 60
2397[info] + iteration 61
2398[info] + iteration 62
2399[info] + iteration 63
2400[info] + iteration 64
2401[info] + iteration 65
2402[info] + iteration 66
2403[info] + iteration 67
2404[info] + iteration 68
2405[info] + iteration 69
2406[info] + iteration 70
2407[info] + iteration 71
2408[info] + iteration 72
2409[info] + iteration 73
2410[info] + iteration 74
2411[info] + iteration 75
2412[info] + iteration 76
2413[info] + iteration 77
2414[info] + iteration 78
2415[info] + iteration 79
2416[info] + iteration 80
2417[info] + iteration 81
2418[info] + iteration 82
2419[info] + iteration 83
2420[info] + iteration 84
2421[info] + iteration 85
2422[info] + iteration 86
2423[info] + iteration 87
2424[info] + iteration 88
2425[info] + iteration 89
2426[info] + iteration 90
2427[info] + iteration 91
2428[info] + iteration 92
2429[info] + iteration 93
2430[info] + iteration 94
2431[info] + iteration 95
2432[info] + iteration 96
2433[info] + iteration 97
2434[info] + iteration 98
2435[info] + iteration 99
2436[info] + iteration 100
2437[info] FlowOpsOrElseTest:
2438[info] orElse
2439[info] - should emit elements only from the original source when it is not empty
2440[info] - should emit elements only from the alternative source when the original source is created empty
2441[info] - should emit elements only from the alternative source when the original source is empty
2442[info] - should return failed source when the original source is failed
2443[info] FlowOpsForeachTest:
2444[info] foreach
2445[info] - should iterate over a flow
2446[info] - should convert flow to a list
2447[info] SourceOpsEmptyTest:
2448[info] Source.empty
2449[info] - should be done
2450[info] - should be empty
2451[info] FlowOpsFlattenParTest:
2452[info] flattenPar
2453[info] - should pipe all elements of the child flows into the output flow
2454[info] - should handle empty flow
2455[info] - should handle singleton flow
2456[info] - should not flatten nested flows
2457[info] - should handle subsequent flatten calls
2458[info] - should run at most parallelism child flows
2459[info] - should pipe elements realtime
2460[info] - should propagate error of any of the child flows and stop piping
2461[info] - should propagate error of the parent flow and stop piping
2462[info] FlowOpsRetryTest:
2463[info] Flow.retry
2464[info] - should successfully run a flow without retries when no errors occur
2465[info] - should retry a failing flow with immediate schedule
2466[info] - should retry a failing flow with fixed interval schedule
2467[info] - should not retry a flow which fails downstream
2468[info] - should fail after exhausting all retry attempts
2469[info] - should use custom ResultPolicy to determine retry worthiness
2470[info] - should handle empty flows correctly
2471[info] - should handle flows that complete successfully on first attempt
2472[info] - should retry the entire flow when processing fails
2473[info] - should work with complex flows containing transformations
2474[info] - should not retry a flow which uses .take and control exceptions
2475[info] LocalTest:
2476[info] fork locals
2477[2026-01-13T14:20:41.502698223Z] [24] main mid
2478[2026-01-13T14:20:41.603262204Z] [313242] In f1 = x
2479[2026-01-13T14:20:41.603772486Z] [24] result = a
2480[2026-01-13T14:20:41.703601374Z] [313245] In f3 = z
2481[2026-01-13T14:20:41.703910327Z] [24] result = a
2482[info] - should properly propagate values using supervisedWhere
2483[2026-01-13T14:20:41.705869369Z] [24] main mid
2484[2026-01-13T14:20:41.806499424Z] [313246] In f1 = x
2485[2026-01-13T14:20:41.806761559Z] [24] result = a
2486[2026-01-13T14:20:41.907321416Z] [313248] In f3 = z
2487[2026-01-13T14:20:41.907614764Z] [24] result = a
2488[info] - should properly propagate values using unsupervisedWhere
2489[2026-01-13T14:20:41.909173598Z] [313250] nested1 = x
2490[2026-01-13T14:20:41.909603230Z] [313251] nested2 = x
2491[2026-01-13T14:20:41.909743422Z] [24] outer = a
2492[info] - should propagate values across multiple scopes
2493[info] - should propagate errors from forks created within local values
2494[2026-01-13T14:20:41.911429491Z] [24] v1
2495[2026-01-13T14:20:41.911758118Z] [24] v2
2496[2026-01-13T14:20:41.911854117Z] [24] RuntimeException
2497[2026-01-13T14:20:41.911901445Z] [24] v1
2498[info] - should correctly set & unset fork locals when an exception is thrown
2499[2026-01-13T14:20:41.912385781Z] [24] v1_1
2500[2026-01-13T14:20:41.912780318Z] [24] v2_1
2501[2026-01-13T14:20:41.913240037Z] [24] v1_2
2502[2026-01-13T14:20:41.913327521Z] [24] v2_2
2503[2026-01-13T14:20:41.913373056Z] [24] v1_1
2504[2026-01-13T14:20:41.913413101Z] [24] v2_1
2505[info] - should correctly set & unset multiple fork locals
2506[info] FlowOpsSampleTest:
2507[info] sample
2508[info] - should not sample anything from an empty flow
2509[info] - should not sample anything when 'n == 0'
2510[info] - should sample every element of the flow when 'n == 1'
2511[info] - should sample every nth element of the flow
2512[info] FlowOpsDrainTest:
2513[info] drain
2514[info] - should drain all elements
2515[info] - should run any side-effects that are part of the flow
2516[info] - should merge with another flow
2517[info] ActorTest:
2518[info] - should invoke methods on the actor
2519[info] - should protect the internal state of the actor
2520[info] - should run the close callback before re-throwing the exception
2521[info] - should end the scope when an exception is thrown when handling .tell
2522[info] - should throw a channel closed exception when the actor's scope becomes closed
2523[info] FlowOpsSlidingTest:
2524[info] sliding
2525[info] - should create sliding windows for n = 2 and step = 1
2526[info] - should create sliding windows for n = 3 and step = 1
2527[info] - should create sliding windows for n = 2 and step = 2
2528[info] - should create sliding windows for n = 3 and step = 2
2529[info] - should create sliding windows for n = 1 and step = 2
2530[info] - should create sliding windows for n = 2 and step = 3
2531[info] - should create sliding windows for n = 2 and step = 3 (with 1 element remaining in the end)
2532[info] - should return failed source when the original source is failed
2533[info] FlowOpsTickTest:
2534[info] - should tick regularly
2535[info] - should tick immediately in case of a slow consumer, and then resume normal
2536Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
2537Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0/classes ...
2539[info] done compiling
2540[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0/test-classes ...
2541[info] done compiling
2542[info] CronScheduleTest:
2543[info] repeat with cron schedule
2544[info] - should repeat a function every second (once)
2545[info] - should repeat a function every second (three times)
2546[info] - should provide initial delay
2547Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
2548Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2549[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.8.0/classes ...
2550[info] done compiling
2551Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
2552Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2553[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.8.0/classes ...
2554[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
2555[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
2556[warn] | ^^^^^^^
2557[warn] | unused explicit parameter
2558[warn] one warning found
2559[info] done compiling
2560[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.8.0/test-classes ...
2561[info] done compiling
256215:20:53.877 [pool-67-thread-3] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
2563 add.partitions.to.txn.retry.backoff.max.ms = 100
2564 add.partitions.to.txn.retry.backoff.ms = 20
2565 advertised.listeners = BROKER://localhost:6001
2566 alter.config.policy.class.name = null
2567 alter.log.dirs.replication.quota.window.num = 11
2568 alter.log.dirs.replication.quota.window.size.seconds = 1
2569 authorizer.class.name =
2570 auto.create.topics.enable = true
2571 auto.leader.rebalance.enable = true
2572 background.threads = 10
2573 broker.heartbeat.interval.ms = 2000
2574 broker.id = 0
2575 broker.rack = null
2576 broker.session.timeout.ms = 9000
2577 client.quota.callback.class = null
2578 compression.gzip.level = -1
2579 compression.lz4.level = 9
2580 compression.type = producer
2581 compression.zstd.level = 3
2582 connection.failed.authentication.delay.ms = 100
2583 connections.max.idle.ms = 600000
2584 connections.max.reauth.ms = 0
2585 controlled.shutdown.enable = true
2586 controller.listener.names = CONTROLLER
2587 controller.performance.always.log.threshold.ms = 2000
2588 controller.performance.sample.period.ms = 60000
2589 controller.quorum.append.linger.ms = 25
2590 controller.quorum.bootstrap.servers = []
2591 controller.quorum.election.backoff.max.ms = 1000
2592 controller.quorum.election.timeout.ms = 1000
2593 controller.quorum.fetch.timeout.ms = 2000
2594 controller.quorum.request.timeout.ms = 2000
2595 controller.quorum.retry.backoff.ms = 20
2596 controller.quorum.voters = [0@localhost:6002]
2597 controller.quota.window.num = 11
2598 controller.quota.window.size.seconds = 1
2599 controller.socket.timeout.ms = 30000
2600 create.topic.policy.class.name = null
2601 default.replication.factor = 1
2602 delegation.token.expiry.check.interval.ms = 3600000
2603 delegation.token.expiry.time.ms = 86400000
2604 delegation.token.max.lifetime.ms = 604800000
2605 delegation.token.secret.key = null
2606 delete.records.purgatory.purge.interval.requests = 1
2607 delete.topic.enable = true
2608 early.start.listeners = null
2609 fetch.max.bytes = 57671680
2610 fetch.purgatory.purge.interval.requests = 1000
2611 group.consumer.assignors = [uniform, range]
2612 group.consumer.heartbeat.interval.ms = 5000
2613 group.consumer.max.heartbeat.interval.ms = 15000
2614 group.consumer.max.session.timeout.ms = 60000
2615 group.consumer.max.size = 2147483647
2616 group.consumer.migration.policy = bidirectional
2617 group.consumer.min.heartbeat.interval.ms = 5000
2618 group.consumer.min.session.timeout.ms = 45000
2619 group.consumer.regex.refresh.interval.ms = 600000
2620 group.consumer.session.timeout.ms = 45000
2621 group.coordinator.append.linger.ms = 5
2622 group.coordinator.rebalance.protocols = [classic, consumer, streams]
2623 group.coordinator.threads = 4
2624 group.initial.rebalance.delay.ms = 3000
2625 group.max.session.timeout.ms = 1800000
2626 group.max.size = 2147483647
2627 group.min.session.timeout.ms = 6000
2628 group.share.assignors = [simple]
2629 group.share.delivery.count.limit = 5
2630 group.share.enable = false
2631 group.share.heartbeat.interval.ms = 5000
2632 group.share.max.heartbeat.interval.ms = 15000
2633 group.share.max.record.lock.duration.ms = 60000
2634 group.share.max.session.timeout.ms = 60000
2635 group.share.max.share.sessions = 2000
2636 group.share.max.size = 200
2637 group.share.min.heartbeat.interval.ms = 5000
2638 group.share.min.record.lock.duration.ms = 15000
2639 group.share.min.session.timeout.ms = 45000
2640 group.share.partition.max.record.locks = 2000
2641 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
2642 group.share.record.lock.duration.ms = 30000
2643 group.share.session.timeout.ms = 45000
2644 group.streams.heartbeat.interval.ms = 5000
2645 group.streams.max.heartbeat.interval.ms = 15000
2646 group.streams.max.session.timeout.ms = 60000
2647 group.streams.max.size = 2147483647
2648 group.streams.max.standby.replicas = 2
2649 group.streams.min.heartbeat.interval.ms = 5000
2650 group.streams.min.session.timeout.ms = 45000
2651 group.streams.num.standby.replicas = 0
2652 group.streams.session.timeout.ms = 45000
2653 initial.broker.registration.timeout.ms = 60000
2654 inter.broker.listener.name = BROKER
2655 internal.metadata.delete.delay.millis = 60000
2656 internal.metadata.log.segment.bytes = null
2657 internal.metadata.max.batch.size.in.bytes = 8388608
2658 internal.metadata.max.fetch.size.in.bytes = 8388608
2659 kafka.metrics.polling.interval.secs = 10
2660 kafka.metrics.reporters = []
2661 leader.imbalance.check.interval.seconds = 300
2662 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
2663 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
2664 log.cleaner.backoff.ms = 15000
2665 log.cleaner.dedupe.buffer.size = 1048577
2666 log.cleaner.delete.retention.ms = 86400000
2667 log.cleaner.enable = true
2668 log.cleaner.io.buffer.load.factor = 0.9
2669 log.cleaner.io.buffer.size = 524288
2670 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
2671 log.cleaner.max.compaction.lag.ms = 9223372036854775807
2672 log.cleaner.min.cleanable.ratio = 0.5
2673 log.cleaner.min.compaction.lag.ms = 0
2674 log.cleaner.threads = 1
2675 log.cleanup.policy = [delete]
2676 log.dir = /tmp/kafka-logs
2677 log.dir.failure.timeout.ms = 30000
2678 log.dirs = /tmp/kafka-logs5982689497894266552
2679 log.flush.interval.messages = 1
2680 log.flush.interval.ms = null
2681 log.flush.offset.checkpoint.interval.ms = 60000
2682 log.flush.scheduler.interval.ms = 9223372036854775807
2683 log.flush.start.offset.checkpoint.interval.ms = 60000
2684 log.index.interval.bytes = 4096
2685 log.index.size.max.bytes = 10485760
2686 log.initial.task.delay.ms = 30000
2687 log.local.retention.bytes = -2
2688 log.local.retention.ms = -2
2689 log.message.timestamp.after.max.ms = 3600000
2690 log.message.timestamp.before.max.ms = 9223372036854775807
2691 log.message.timestamp.type = CreateTime
2692 log.preallocate = false
2693 log.retention.bytes = -1
2694 log.retention.check.interval.ms = 300000
2695 log.retention.hours = 168
2696 log.retention.minutes = null
2697 log.retention.ms = null
2698 log.roll.hours = 168
2699 log.roll.jitter.hours = 0
2700 log.roll.jitter.ms = null
2701 log.roll.ms = null
2702 log.segment.bytes = 1073741824
2703 log.segment.delete.delay.ms = 60000
2704 max.connection.creation.rate = 2147483647
2705 max.connections = 2147483647
2706 max.connections.per.ip = 2147483647
2707 max.connections.per.ip.overrides =
2708 max.incremental.fetch.session.cache.slots = 1000
2709 max.request.partition.size.limit = 2000
2710 message.max.bytes = 1048588
2711 metadata.log.dir = null
2712 metadata.log.max.record.bytes.between.snapshots = 20971520
2713 metadata.log.max.snapshot.interval.ms = 3600000
2714 metadata.log.segment.bytes = 1073741824
2715 metadata.log.segment.ms = 604800000
2716 metadata.max.idle.interval.ms = 500
2717 metadata.max.retention.bytes = 104857600
2718 metadata.max.retention.ms = 604800000
2719 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
2720 metrics.num.samples = 2
2721 metrics.recording.level = INFO
2722 metrics.sample.window.ms = 30000
2723 min.insync.replicas = 1
2724 node.id = 0
2725 num.io.threads = 8
2726 num.network.threads = 3
2727 num.partitions = 1
2728 num.recovery.threads.per.data.dir = 2
2729 num.replica.alter.log.dirs.threads = null
2730 num.replica.fetchers = 1
2731 offset.metadata.max.bytes = 4096
2732 offsets.commit.timeout.ms = 5000
2733 offsets.load.buffer.size = 5242880
2734 offsets.retention.check.interval.ms = 600000
2735 offsets.retention.minutes = 10080
2736 offsets.topic.compression.codec = 0
2737 offsets.topic.num.partitions = 1
2738 offsets.topic.replication.factor = 1
2739 offsets.topic.segment.bytes = 104857600
2740 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
2741 process.roles = [broker, controller]
2742 producer.id.expiration.check.interval.ms = 600000
2743 producer.id.expiration.ms = 86400000
2744 producer.purgatory.purge.interval.requests = 1000
2745 queued.max.request.bytes = -1
2746 queued.max.requests = 500
2747 quota.window.num = 11
2748 quota.window.size.seconds = 1
2749 remote.fetch.max.wait.ms = 500
2750 remote.list.offsets.request.timeout.ms = 30000
2751 remote.log.index.file.cache.total.size.bytes = 1073741824
2752 remote.log.manager.copier.thread.pool.size = 10
2753 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
2754 remote.log.manager.copy.quota.window.num = 11
2755 remote.log.manager.copy.quota.window.size.seconds = 1
2756 remote.log.manager.expiration.thread.pool.size = 10
2757 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
2758 remote.log.manager.fetch.quota.window.num = 11
2759 remote.log.manager.fetch.quota.window.size.seconds = 1
2760 remote.log.manager.task.interval.ms = 30000
2761 remote.log.manager.task.retry.backoff.max.ms = 30000
2762 remote.log.manager.task.retry.backoff.ms = 500
2763 remote.log.manager.task.retry.jitter = 0.2
2764 remote.log.manager.thread.pool.size = 2
2765 remote.log.metadata.custom.metadata.max.bytes = 128
2766 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
2767 remote.log.metadata.manager.class.path = null
2768 remote.log.metadata.manager.impl.prefix = rlmm.config.
2769 remote.log.metadata.manager.listener.name = null
2770 remote.log.reader.max.pending.tasks = 100
2771 remote.log.reader.threads = 10
2772 remote.log.storage.manager.class.name = null
2773 remote.log.storage.manager.class.path = null
2774 remote.log.storage.manager.impl.prefix = rsm.config.
2775 remote.log.storage.system.enable = false
2776 replica.fetch.backoff.ms = 1000
2777 replica.fetch.max.bytes = 1048576
2778 replica.fetch.min.bytes = 1
2779 replica.fetch.response.max.bytes = 10485760
2780 replica.fetch.wait.max.ms = 500
2781 replica.high.watermark.checkpoint.interval.ms = 5000
2782 replica.lag.time.max.ms = 30000
2783 replica.selector.class = null
2784 replica.socket.receive.buffer.bytes = 65536
2785 replica.socket.timeout.ms = 30000
2786 replication.quota.window.num = 11
2787 replication.quota.window.size.seconds = 1
2788 request.timeout.ms = 30000
2789 sasl.client.callback.handler.class = null
2790 sasl.enabled.mechanisms = [GSSAPI]
2791 sasl.jaas.config = null
2792 sasl.kerberos.kinit.cmd = /usr/bin/kinit
2793 sasl.kerberos.min.time.before.relogin = 60000
2794 sasl.kerberos.principal.to.local.rules = [DEFAULT]
2795 sasl.kerberos.service.name = null
2796 sasl.kerberos.ticket.renew.jitter = 0.05
2797 sasl.kerberos.ticket.renew.window.factor = 0.8
2798 sasl.login.callback.handler.class = null
2799 sasl.login.class = null
2800 sasl.login.connect.timeout.ms = null
2801 sasl.login.read.timeout.ms = null
2802 sasl.login.refresh.buffer.seconds = 300
2803 sasl.login.refresh.min.period.seconds = 60
2804 sasl.login.refresh.window.factor = 0.8
2805 sasl.login.refresh.window.jitter = 0.05
2806 sasl.login.retry.backoff.max.ms = 10000
2807 sasl.login.retry.backoff.ms = 100
2808 sasl.mechanism.controller.protocol = GSSAPI
2809 sasl.mechanism.inter.broker.protocol = GSSAPI
2810 sasl.oauthbearer.assertion.algorithm = RS256
2811 sasl.oauthbearer.assertion.claim.aud = null
2812 sasl.oauthbearer.assertion.claim.exp.seconds = 300
2813 sasl.oauthbearer.assertion.claim.iss = null
2814 sasl.oauthbearer.assertion.claim.jti.include = false
2815 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
2816 sasl.oauthbearer.assertion.claim.sub = null
2817 sasl.oauthbearer.assertion.file = null
2818 sasl.oauthbearer.assertion.private.key.file = null
2819 sasl.oauthbearer.assertion.private.key.passphrase = null
2820 sasl.oauthbearer.assertion.template.file = null
2821 sasl.oauthbearer.client.credentials.client.id = null
2822 sasl.oauthbearer.client.credentials.client.secret = null
2823 sasl.oauthbearer.clock.skew.seconds = 30
2824 sasl.oauthbearer.expected.audience = null
2825 sasl.oauthbearer.expected.issuer = null
2826 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
2827 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
2828 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
2829 sasl.oauthbearer.jwks.endpoint.url = null
2830 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
2831 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
2832 sasl.oauthbearer.scope = null
2833 sasl.oauthbearer.scope.claim.name = scope
2834 sasl.oauthbearer.sub.claim.name = sub
2835 sasl.oauthbearer.token.endpoint.url = null
2836 sasl.server.callback.handler.class = null
2837 sasl.server.max.receive.size = 524288
2838 security.inter.broker.protocol = PLAINTEXT
2839 security.providers = null
2840 server.max.startup.time.ms = 9223372036854775807
2841 share.coordinator.append.linger.ms = 5
2842 share.coordinator.cold.partition.snapshot.interval.ms = 300000
2843 share.coordinator.load.buffer.size = 5242880
2844 share.coordinator.snapshot.update.records.per.snapshot = 500
2845 share.coordinator.state.topic.compression.codec = 0
2846 share.coordinator.state.topic.min.isr = 2
2847 share.coordinator.state.topic.num.partitions = 50
2848 share.coordinator.state.topic.prune.interval.ms = 300000
2849 share.coordinator.state.topic.replication.factor = 3
2850 share.coordinator.state.topic.segment.bytes = 104857600
2851 share.coordinator.threads = 1
2852 share.coordinator.write.timeout.ms = 5000
2853 share.fetch.purgatory.purge.interval.requests = 1000
2854 socket.connection.setup.timeout.max.ms = 30000
2855 socket.connection.setup.timeout.ms = 10000
2856 socket.listen.backlog.size = 50
2857 socket.receive.buffer.bytes = 102400
2858 socket.request.max.bytes = 104857600
2859 socket.send.buffer.bytes = 102400
2860 ssl.allow.dn.changes = false
2861 ssl.allow.san.changes = false
2862 ssl.cipher.suites = []
2863 ssl.client.auth = none
2864 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
2865 ssl.endpoint.identification.algorithm = https
2866 ssl.engine.factory.class = null
2867 ssl.key.password = null
2868 ssl.keymanager.algorithm = SunX509
2869 ssl.keystore.certificate.chain = null
2870 ssl.keystore.key = null
2871 ssl.keystore.location = null
2872 ssl.keystore.password = null
2873 ssl.keystore.type = JKS
2874 ssl.principal.mapping.rules = DEFAULT
2875 ssl.protocol = TLSv1.3
2876 ssl.provider = null
2877 ssl.secure.random.implementation = null
2878 ssl.trustmanager.algorithm = PKIX
2879 ssl.truststore.certificates = null
2880 ssl.truststore.location = null
2881 ssl.truststore.password = null
2882 ssl.truststore.type = JKS
2883 telemetry.max.bytes = 1048576
2884 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
2885 transaction.max.timeout.ms = 900000
2886 transaction.partition.verification.enable = true
2887 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
2888 transaction.state.log.load.buffer.size = 5242880
2889 transaction.state.log.min.isr = 1
2890 transaction.state.log.num.partitions = 50
2891 transaction.state.log.replication.factor = 1
2892 transaction.state.log.segment.bytes = 104857600
2893 transaction.two.phase.commit.enable = false
2894 transactional.id.expiration.ms = 604800000
2895 unclean.leader.election.enable = false
2896 unclean.leader.election.interval.ms = 300000
2897 unstable.api.versions.enable = false
2898 unstable.feature.versions.enable = false
2899
290015:20:54.083 [pool-67-thread-3] INFO k.u.Log4jControllerRegistration$ - Registered `kafka:type=kafka.Log4jController` MBean
290115:20:54.131 [pool-67-thread-3] INFO i.g.e.EmbeddedKafka$ - [KafkaRaftServer nodeId=0] Rewriting /tmp/kafka-logs5982689497894266552/meta.properties
290215:20:54.184 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Starting controller
290315:20:54.587 [pool-67-thread-3] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
290415:20:54.620 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(CONTROLLER)
290515:20:54.625 [pool-67-thread-3] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint CONTROLLER. Endpoint is now READY.
290615:20:54.627 [pool-67-thread-3] INFO k.s.SharedServer - [SharedServer id=0] Starting SharedServer
290715:20:54.675 [pool-67-thread-3] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
290815:20:54.676 [pool-67-thread-3] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs5982689497894266552] Reloading from producer snapshot and rebuilding producer state from offset 0
290915:20:54.676 [pool-67-thread-3] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs5982689497894266552] Producer state recovery took 0ms for snapshot load and 0ms for segment recovery from offset 0
291015:20:54.694 [pool-67-thread-3] INFO k.r.KafkaMetadataLog$ - Initialized snapshots with IDs SortedSet() from /tmp/kafka-logs5982689497894266552/__cluster_metadata-0
291115:20:54.705 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Starting
291215:20:54.718 [pool-67-thread-3] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Reading KRaft snapshot and log as part of the initialization
291315:20:54.720 [pool-67-thread-3] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting voters are VoterSet(voters={0=VoterNode(voterKey=ReplicaKey(id=0, directoryId=<undefined>), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/127.0.0.1:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:0])})
291415:20:54.723 [pool-67-thread-3] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting request manager with static voters: [localhost:6002 (id: 0 rack: null isFenced: false)]
291515:20:54.726 [pool-67-thread-3] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1606, highWatermark=Optional.empty) from null
291615:20:54.798 [pool-67-thread-3] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1606, highWatermark=Optional.empty) from null
291715:20:54.802 [pool-67-thread-3] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1146, highWatermark=Optional.empty) from UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1606, highWatermark=Optional.empty)
291815:20:54.803 [pool-67-thread-3] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to CandidateState(localId=0, localDirectoryId=bfxY4JLWDJqXknmblZ20cw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1907) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1146, highWatermark=Optional.empty)
291915:20:54.808 [pool-67-thread-3] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to CandidateState(localId=0, localDirectoryId=bfxY4JLWDJqXknmblZ20cw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1907) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1146, highWatermark=Optional.empty)
292015:20:54.814 [pool-67-thread-3] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=bfxY4JLWDJqXknmblZ20cw), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=bfxY4JLWDJqXknmblZ20cw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1907)
292115:20:54.815 [pool-67-thread-3] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=bfxY4JLWDJqXknmblZ20cw), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=bfxY4JLWDJqXknmblZ20cw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1907)
292215:20:54.833 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Starting
292315:20:54.833 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Starting
292415:20:54.848 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] initializeNewPublishers: the loader is still catching up because we still don't know the high water mark yet.
292515:20:54.850 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for controller quorum voters future
292615:20:54.850 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for controller quorum voters future
292715:20:54.857 [kafka-0-raft-io-thread] INFO o.a.k.r.LeaderState - [RaftManager id=0] High watermark set to LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)]) for the first time for epoch 1 based on indexOfHw 0 and voters [ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional[LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)])], lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)]
292815:20:54.863 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.image.loader.MetadataLoader@1672530426
292915:20:54.865 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.image.loader.MetadataLoader@1672530426 to 0 since there are no snapshots
293015:20:54.869 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader is still catching up because we have not loaded a controller record as of offset 0 and high water mark is 1
293115:20:54.891 [pool-67-thread-3] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task writeNoOpRecord to run every 500 ms
293215:20:54.891 [pool-67-thread-3] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task maybeFenceStaleBroker to run every 1125 ms
293315:20:54.892 [pool-67-thread-3] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electPreferred to run every 300000 ms
293415:20:54.892 [pool-67-thread-3] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electUnclean to run every 300000 ms
293515:20:54.892 [pool-67-thread-3] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task expireDelegationTokens to run every 3600000 ms
293615:20:54.892 [pool-67-thread-3] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task generatePeriodicPerformanceMessage to run every 60000 ms
293715:20:54.893 [pool-67-thread-3] INFO o.a.k.c.QuorumController - [QuorumController id=0] Creating new QuorumController with clusterId BJqwkK6mSo6OvprLaTJJCw
293815:20:54.893 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@272237472
293915:20:54.894 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@272237472 to 0 since there are no snapshots
294015:20:54.898 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] Becoming the active controller at epoch 1, next write offset 1.
294115:20:54.901 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Starting
294215:20:54.901 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Starting
294315:20:54.901 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Starting
294415:20:54.904 [quorum-controller-0-event-handler] WARN o.a.k.c.QuorumController - [QuorumController id=0] Performing controller activation. The metadata log appears to be empty. Appending 1 bootstrap record(s) in metadata transaction at metadata.version 4.1-IV1 from bootstrap source 'the default bootstrap'.
294515:20:54.905 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Starting
294615:20:54.907 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed BeginTransactionRecord(name='Bootstrap records') at offset 1.
294715:20:54.908 [quorum-controller-0-event-handler] INFO o.a.k.c.FeatureControlManager - [QuorumController id=0] Replayed a FeatureLevelRecord setting metadata.version to 4.1-IV1
294815:20:54.908 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed EndTransactionRecord() at offset 3.
294915:20:54.909 [quorum-controller-0-event-handler] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Activated periodic tasks: electPreferred, electUnclean, expireDelegationTokens, generatePeriodicPerformanceMessage, maybeFenceStaleBroker, writeNoOpRecord
295015:20:54.923 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
295115:20:54.940 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for the controller metadata publishers to be installed
295215:20:54.941 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader finished catching up to the current high water mark of 4
295315:20:54.943 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing SnapshotGenerator with a snapshot at offset 3
295415:20:54.943 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing KRaftMetadataCachePublisher with a snapshot at offset 3
295515:20:54.943 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing FeaturesPublisher with a snapshot at offset 3
295615:20:54.945 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for the controller metadata publishers to be installed
295715:20:54.946 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Enabling request processing.
295815:20:54.957 [kafka-0-metadata-loader-event-handler] INFO o.a.k.m.p.FeaturesPublisher - [ControllerServer id=0] Loaded new metadata FinalizedFeatures[metadataVersion=4.1-IV1, finalizedFeatures={metadata.version=27}, finalizedFeaturesEpoch=3].
295915:20:54.957 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationsPublisher with a snapshot at offset 3
296015:20:54.957 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationManager with a snapshot at offset 3
296115:20:54.958 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicConfigPublisher controller id=0 with a snapshot at offset 3
296215:20:54.960 [pool-67-thread-3] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6002.
296315:20:54.961 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicClientQuotaPublisher controller id=0 with a snapshot at offset 3
296415:20:54.962 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicTopicClusterQuotaPublisher controller id=0 with a snapshot at offset 3
296515:20:54.963 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ScramPublisher controller id=0 with a snapshot at offset 3
296615:20:54.964 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DelegationTokenPublisher controller id=0 with a snapshot at offset 3
296715:20:54.968 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerMetadataMetricsPublisher with a snapshot at offset 3
296815:20:54.968 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing AclPublisher controller id=0 with a snapshot at offset 3
296915:20:54.974 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Starting
297015:20:54.975 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
297115:20:54.974 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] initialized channel manager.
297215:20:54.974 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the authorizer futures to be completed
297315:20:54.976 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the authorizer futures to be completed
297415:20:54.976 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the SocketServer Acceptors to be started
297515:20:54.977 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
297615:20:54.977 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTDOWN to STARTING
297715:20:54.977 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Starting broker
297815:20:54.983 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] sendControllerRegistration: attempting to send ControllerRegistrationRequestData(controllerId=0, incarnationId=otvYEWT7TdqjIMfAFNpAYw, zkMigrationReady=false, listeners=[Listener(name='CONTROLLER', host='localhost', port=6002, securityProtocol=0)], features=[Feature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), Feature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), Feature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)])
297915:20:54.989 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Starting
298015:20:54.990 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Starting
298115:20:54.991 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Starting
298215:20:54.992 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Starting
298315:20:55.011 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for controller quorum voters future
298415:20:55.012 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for controller quorum voters future
298515:20:55.014 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Starting
298615:20:55.014 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
298715:20:55.024 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Starting
298815:20:55.068 [pool-67-thread-3] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
298915:20:55.073 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(BROKER)
299015:20:55.083 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed RegisterControllerRecord containing ControllerRegistration(id=0, incarnationId=otvYEWT7TdqjIMfAFNpAYw, zkMigrationReady=false, listeners=[Endpoint(listenerName='CONTROLLER', securityProtocol=PLAINTEXT, host='localhost', port=6002)], supportedFeatures={eligible.leader.replicas.version: 0-1, group.version: 0-1, kraft.version: 0-1, metadata.version: 7-27, share.version: 0-1, transaction.version: 0-2}).
299115:20:55.083 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Starting
299215:20:55.084 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299315:20:55.088 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Starting
299415:20:55.088 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299515:20:55.104 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] Our registration has been persisted to the metadata log.
299615:20:55.105 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Starting
299715:20:55.106 [controller-0-to-controller-registration-channel-manager] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] RegistrationResponseHandler: controller acknowledged ControllerRegistrationRequest.
299815:20:55.106 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Starting
299915:20:55.107 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Starting
300015:20:55.107 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Starting
300115:20:55.109 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Starting
300215:20:55.109 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Starting
300315:20:55.125 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Starting
300415:20:55.146 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Starting
300515:20:55.151 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Starting
300615:20:55.153 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Starting
300715:20:55.154 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Starting
300815:20:55.165 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Starting
300915:20:55.165 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Starting
301015:20:55.165 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Starting
301115:20:55.166 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Starting
301215:20:55.182 [pool-67-thread-3] INFO k.l.LogManager - Unable to read the broker epoch in /tmp/kafka-logs5982689497894266552.
301315:20:55.183 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Starting
301415:20:55.183 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
301515:20:55.185 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Incarnation GynRwrzJSrixNIfOpXiGxA of broker 0 in cluster BJqwkK6mSo6OvprLaTJJCw is now STARTING.
301615:20:55.192 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Starting
301715:20:55.196 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] No previous registration found for broker 0. New incarnation ID is GynRwrzJSrixNIfOpXiGxA. Generated 0 record(s) to clean up previous incarnations. New broker epoch is 5.
301815:20:55.202 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed initial RegisterBrokerRecord for broker 0: RegisterBrokerRecord(brokerId=0, isMigratingZkBroker=false, incarnationId=GynRwrzJSrixNIfOpXiGxA, brokerEpoch=5, endPoints=[BrokerEndpoint(name='BROKER', host='localhost', port=6001, securityProtocol=0)], features=[BrokerFeature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), BrokerFeature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), BrokerFeature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)], rack=null, fenced=true, inControlledShutdown=false, logDirs=[bfxY4JLWDJqXknmblZ20cw])
301915:20:55.207 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
302015:20:55.220 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker metadata publishers to be installed
302115:20:55.220 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker metadata publishers to be installed
302215:20:55.221 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the controller to acknowledge that we are caught up
302315:20:55.221 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing MetadataVersionPublisher(id=0) with a snapshot at offset 4
302415:20:55.221 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerMetadataPublisher with a snapshot at offset 4
302515:20:55.224 [kafka-0-metadata-loader-event-handler] INFO k.s.m.BrokerMetadataPublisher - [BrokerMetadataPublisher id=0] Publishing initial metadata at offset OffsetAndEpoch[offset=4, epoch=1] with metadata.version Optional[4.1-IV1].
302615:20:55.224 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loading logs from log dirs ArrayBuffer(/tmp/kafka-logs5982689497894266552)
302715:20:55.228 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - No logs found to be loaded in /tmp/kafka-logs5982689497894266552
302815:20:55.230 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Successfully registered broker 0 with broker epoch 5
302915:20:55.238 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loaded 0 logs in 11ms
303015:20:55.238 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log cleanup with a period of 300000 ms.
303115:20:55.239 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log flusher with a default period of 9223372036854775807 ms.
303215:20:55.244 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.LogCleaner - Starting the log cleaner
303315:20:55.248 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Starting
303415:20:55.254 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Starting
303515:20:55.254 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Starting
303615:20:55.257 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Starting up.
303715:20:55.258 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Startup complete.
303815:20:55.258 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Starting up.
303915:20:55.259 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Starting
304015:20:55.260 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Startup complete.
304115:20:55.260 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Starting up.
304215:20:55.260 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Startup complete.
304315:20:55.268 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerRegistrationTracker(id=0) with a snapshot at offset 4
304415:20:55.277 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has caught up. Transitioning from STARTING to RECOVERY.
304515:20:55.277 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the controller to acknowledge that we are caught up
304615:20:55.277 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the initial broker metadata update to be published
304715:20:55.277 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the initial broker metadata update to be published
304815:20:55.279 [pool-67-thread-3] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
3049 add.partitions.to.txn.retry.backoff.max.ms = 100
3050 add.partitions.to.txn.retry.backoff.ms = 20
3051 advertised.listeners = BROKER://localhost:6001
3052 alter.config.policy.class.name = null
3053 alter.log.dirs.replication.quota.window.num = 11
3054 alter.log.dirs.replication.quota.window.size.seconds = 1
3055 authorizer.class.name =
3056 auto.create.topics.enable = true
3057 auto.leader.rebalance.enable = true
3058 background.threads = 10
3059 broker.heartbeat.interval.ms = 2000
3060 broker.id = 0
3061 broker.rack = null
3062 broker.session.timeout.ms = 9000
3063 client.quota.callback.class = null
3064 compression.gzip.level = -1
3065 compression.lz4.level = 9
3066 compression.type = producer
3067 compression.zstd.level = 3
3068 connection.failed.authentication.delay.ms = 100
3069 connections.max.idle.ms = 600000
3070 connections.max.reauth.ms = 0
3071 controlled.shutdown.enable = true
3072 controller.listener.names = CONTROLLER
3073 controller.performance.always.log.threshold.ms = 2000
3074 controller.performance.sample.period.ms = 60000
3075 controller.quorum.append.linger.ms = 25
3076 controller.quorum.bootstrap.servers = []
3077 controller.quorum.election.backoff.max.ms = 1000
3078 controller.quorum.election.timeout.ms = 1000
3079 controller.quorum.fetch.timeout.ms = 2000
3080 controller.quorum.request.timeout.ms = 2000
3081 controller.quorum.retry.backoff.ms = 20
3082 controller.quorum.voters = [0@localhost:6002]
3083 controller.quota.window.num = 11
3084 controller.quota.window.size.seconds = 1
3085 controller.socket.timeout.ms = 30000
3086 create.topic.policy.class.name = null
3087 default.replication.factor = 1
3088 delegation.token.expiry.check.interval.ms = 3600000
3089 delegation.token.expiry.time.ms = 86400000
3090 delegation.token.max.lifetime.ms = 604800000
3091 delegation.token.secret.key = null
3092 delete.records.purgatory.purge.interval.requests = 1
3093 delete.topic.enable = true
3094 early.start.listeners = null
3095 fetch.max.bytes = 57671680
3096 fetch.purgatory.purge.interval.requests = 1000
3097 group.consumer.assignors = [uniform, range]
3098 group.consumer.heartbeat.interval.ms = 5000
3099 group.consumer.max.heartbeat.interval.ms = 15000
3100 group.consumer.max.session.timeout.ms = 60000
3101 group.consumer.max.size = 2147483647
3102 group.consumer.migration.policy = bidirectional
3103 group.consumer.min.heartbeat.interval.ms = 5000
3104 group.consumer.min.session.timeout.ms = 45000
3105 group.consumer.regex.refresh.interval.ms = 600000
3106 group.consumer.session.timeout.ms = 45000
3107 group.coordinator.append.linger.ms = 5
3108 group.coordinator.rebalance.protocols = [classic, consumer, streams]
3109 group.coordinator.threads = 4
3110 group.initial.rebalance.delay.ms = 3000
3111 group.max.session.timeout.ms = 1800000
3112 group.max.size = 2147483647
3113 group.min.session.timeout.ms = 6000
3114 group.share.assignors = [simple]
3115 group.share.delivery.count.limit = 5
3116 group.share.enable = false
3117 group.share.heartbeat.interval.ms = 5000
3118 group.share.max.heartbeat.interval.ms = 15000
3119 group.share.max.record.lock.duration.ms = 60000
3120 group.share.max.session.timeout.ms = 60000
3121 group.share.max.share.sessions = 2000
3122 group.share.max.size = 200
3123 group.share.min.heartbeat.interval.ms = 5000
3124 group.share.min.record.lock.duration.ms = 15000
3125 group.share.min.session.timeout.ms = 45000
3126 group.share.partition.max.record.locks = 2000
3127 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
3128 group.share.record.lock.duration.ms = 30000
3129 group.share.session.timeout.ms = 45000
3130 group.streams.heartbeat.interval.ms = 5000
3131 group.streams.max.heartbeat.interval.ms = 15000
3132 group.streams.max.session.timeout.ms = 60000
3133 group.streams.max.size = 2147483647
3134 group.streams.max.standby.replicas = 2
3135 group.streams.min.heartbeat.interval.ms = 5000
3136 group.streams.min.session.timeout.ms = 45000
3137 group.streams.num.standby.replicas = 0
3138 group.streams.session.timeout.ms = 45000
3139 initial.broker.registration.timeout.ms = 60000
3140 inter.broker.listener.name = BROKER
3141 internal.metadata.delete.delay.millis = 60000
3142 internal.metadata.log.segment.bytes = null
3143 internal.metadata.max.batch.size.in.bytes = 8388608
3144 internal.metadata.max.fetch.size.in.bytes = 8388608
3145 kafka.metrics.polling.interval.secs = 10
3146 kafka.metrics.reporters = []
3147 leader.imbalance.check.interval.seconds = 300
3148 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
3149 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
3150 log.cleaner.backoff.ms = 15000
3151 log.cleaner.dedupe.buffer.size = 1048577
3152 log.cleaner.delete.retention.ms = 86400000
3153 log.cleaner.enable = true
3154 log.cleaner.io.buffer.load.factor = 0.9
3155 log.cleaner.io.buffer.size = 524288
3156 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
3157 log.cleaner.max.compaction.lag.ms = 9223372036854775807
3158 log.cleaner.min.cleanable.ratio = 0.5
3159 log.cleaner.min.compaction.lag.ms = 0
3160 log.cleaner.threads = 1
3161 log.cleanup.policy = [delete]
3162 log.dir = /tmp/kafka-logs
3163 log.dir.failure.timeout.ms = 30000
3164 log.dirs = /tmp/kafka-logs5982689497894266552
3165 log.flush.interval.messages = 1
3166 log.flush.interval.ms = null
3167 log.flush.offset.checkpoint.interval.ms = 60000
3168 log.flush.scheduler.interval.ms = 9223372036854775807
3169 log.flush.start.offset.checkpoint.interval.ms = 60000
3170 log.index.interval.bytes = 4096
3171 log.index.size.max.bytes = 10485760
3172 log.initial.task.delay.ms = 30000
3173 log.local.retention.bytes = -2
3174 log.local.retention.ms = -2
3175 log.message.timestamp.after.max.ms = 3600000
3176 log.message.timestamp.before.max.ms = 9223372036854775807
3177 log.message.timestamp.type = CreateTime
3178 log.preallocate = false
3179 log.retention.bytes = -1
3180 log.retention.check.interval.ms = 300000
3181 log.retention.hours = 168
3182 log.retention.minutes = null
3183 log.retention.ms = null
3184 log.roll.hours = 168
3185 log.roll.jitter.hours = 0
3186 log.roll.jitter.ms = null
3187 log.roll.ms = null
3188 log.segment.bytes = 1073741824
3189 log.segment.delete.delay.ms = 60000
3190 max.connection.creation.rate = 2147483647
3191 max.connections = 2147483647
3192 max.connections.per.ip = 2147483647
3193 max.connections.per.ip.overrides =
3194 max.incremental.fetch.session.cache.slots = 1000
3195 max.request.partition.size.limit = 2000
3196 message.max.bytes = 1048588
3197 metadata.log.dir = null
3198 metadata.log.max.record.bytes.between.snapshots = 20971520
3199 metadata.log.max.snapshot.interval.ms = 3600000
3200 metadata.log.segment.bytes = 1073741824
3201 metadata.log.segment.ms = 604800000
3202 metadata.max.idle.interval.ms = 500
3203 metadata.max.retention.bytes = 104857600
3204 metadata.max.retention.ms = 604800000
3205 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3206 metrics.num.samples = 2
3207 metrics.recording.level = INFO
3208 metrics.sample.window.ms = 30000
3209 min.insync.replicas = 1
3210 node.id = 0
3211 num.io.threads = 8
3212 num.network.threads = 3
3213 num.partitions = 1
3214 num.recovery.threads.per.data.dir = 2
3215 num.replica.alter.log.dirs.threads = null
3216 num.replica.fetchers = 1
3217 offset.metadata.max.bytes = 4096
3218 offsets.commit.timeout.ms = 5000
3219 offsets.load.buffer.size = 5242880
3220 offsets.retention.check.interval.ms = 600000
3221 offsets.retention.minutes = 10080
3222 offsets.topic.compression.codec = 0
3223 offsets.topic.num.partitions = 1
3224 offsets.topic.replication.factor = 1
3225 offsets.topic.segment.bytes = 104857600
3226 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
3227 process.roles = [broker, controller]
3228 producer.id.expiration.check.interval.ms = 600000
3229 producer.id.expiration.ms = 86400000
3230 producer.purgatory.purge.interval.requests = 1000
3231 queued.max.request.bytes = -1
3232 queued.max.requests = 500
3233 quota.window.num = 11
3234 quota.window.size.seconds = 1
3235 remote.fetch.max.wait.ms = 500
3236 remote.list.offsets.request.timeout.ms = 30000
3237 remote.log.index.file.cache.total.size.bytes = 1073741824
3238 remote.log.manager.copier.thread.pool.size = 10
3239 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
3240 remote.log.manager.copy.quota.window.num = 11
3241 remote.log.manager.copy.quota.window.size.seconds = 1
3242 remote.log.manager.expiration.thread.pool.size = 10
3243 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
3244 remote.log.manager.fetch.quota.window.num = 11
3245 remote.log.manager.fetch.quota.window.size.seconds = 1
3246 remote.log.manager.task.interval.ms = 30000
3247 remote.log.manager.task.retry.backoff.max.ms = 30000
3248 remote.log.manager.task.retry.backoff.ms = 500
3249 remote.log.manager.task.retry.jitter = 0.2
3250 remote.log.manager.thread.pool.size = 2
3251 remote.log.metadata.custom.metadata.max.bytes = 128
3252 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
3253 remote.log.metadata.manager.class.path = null
3254 remote.log.metadata.manager.impl.prefix = rlmm.config.
3255 remote.log.metadata.manager.listener.name = null
3256 remote.log.reader.max.pending.tasks = 100
3257 remote.log.reader.threads = 10
3258 remote.log.storage.manager.class.name = null
3259 remote.log.storage.manager.class.path = null
3260 remote.log.storage.manager.impl.prefix = rsm.config.
3261 remote.log.storage.system.enable = false
3262 replica.fetch.backoff.ms = 1000
3263 replica.fetch.max.bytes = 1048576
3264 replica.fetch.min.bytes = 1
3265 replica.fetch.response.max.bytes = 10485760
3266 replica.fetch.wait.max.ms = 500
3267 replica.high.watermark.checkpoint.interval.ms = 5000
3268 replica.lag.time.max.ms = 30000
3269 replica.selector.class = null
3270 replica.socket.receive.buffer.bytes = 65536
3271 replica.socket.timeout.ms = 30000
3272 replication.quota.window.num = 11
3273 replication.quota.window.size.seconds = 1
3274 request.timeout.ms = 30000
3275 sasl.client.callback.handler.class = null
3276 sasl.enabled.mechanisms = [GSSAPI]
3277 sasl.jaas.config = null
3278 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3279 sasl.kerberos.min.time.before.relogin = 60000
3280 sasl.kerberos.principal.to.local.rules = [DEFAULT]
3281 sasl.kerberos.service.name = null
3282 sasl.kerberos.ticket.renew.jitter = 0.05
3283 sasl.kerberos.ticket.renew.window.factor = 0.8
3284 sasl.login.callback.handler.class = null
3285 sasl.login.class = null
3286 sasl.login.connect.timeout.ms = null
3287 sasl.login.read.timeout.ms = null
3288 sasl.login.refresh.buffer.seconds = 300
3289 sasl.login.refresh.min.period.seconds = 60
3290 sasl.login.refresh.window.factor = 0.8
3291 sasl.login.refresh.window.jitter = 0.05
3292 sasl.login.retry.backoff.max.ms = 10000
3293 sasl.login.retry.backoff.ms = 100
3294 sasl.mechanism.controller.protocol = GSSAPI
3295 sasl.mechanism.inter.broker.protocol = GSSAPI
3296 sasl.oauthbearer.assertion.algorithm = RS256
3297 sasl.oauthbearer.assertion.claim.aud = null
3298 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3299 sasl.oauthbearer.assertion.claim.iss = null
3300 sasl.oauthbearer.assertion.claim.jti.include = false
3301 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3302 sasl.oauthbearer.assertion.claim.sub = null
3303 sasl.oauthbearer.assertion.file = null
3304 sasl.oauthbearer.assertion.private.key.file = null
3305 sasl.oauthbearer.assertion.private.key.passphrase = null
3306 sasl.oauthbearer.assertion.template.file = null
3307 sasl.oauthbearer.client.credentials.client.id = null
3308 sasl.oauthbearer.client.credentials.client.secret = null
3309 sasl.oauthbearer.clock.skew.seconds = 30
3310 sasl.oauthbearer.expected.audience = null
3311 sasl.oauthbearer.expected.issuer = null
3312 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3313 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3314 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3315 sasl.oauthbearer.jwks.endpoint.url = null
3316 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3317 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3318 sasl.oauthbearer.scope = null
3319 sasl.oauthbearer.scope.claim.name = scope
3320 sasl.oauthbearer.sub.claim.name = sub
3321 sasl.oauthbearer.token.endpoint.url = null
3322 sasl.server.callback.handler.class = null
3323 sasl.server.max.receive.size = 524288
3324 security.inter.broker.protocol = PLAINTEXT
3325 security.providers = null
3326 server.max.startup.time.ms = 9223372036854775807
3327 share.coordinator.append.linger.ms = 5
3328 share.coordinator.cold.partition.snapshot.interval.ms = 300000
3329 share.coordinator.load.buffer.size = 5242880
3330 share.coordinator.snapshot.update.records.per.snapshot = 500
3331 share.coordinator.state.topic.compression.codec = 0
3332 share.coordinator.state.topic.min.isr = 2
3333 share.coordinator.state.topic.num.partitions = 50
3334 share.coordinator.state.topic.prune.interval.ms = 300000
3335 share.coordinator.state.topic.replication.factor = 3
3336 share.coordinator.state.topic.segment.bytes = 104857600
3337 share.coordinator.threads = 1
3338 share.coordinator.write.timeout.ms = 5000
3339 share.fetch.purgatory.purge.interval.requests = 1000
3340 socket.connection.setup.timeout.max.ms = 30000
3341 socket.connection.setup.timeout.ms = 10000
3342 socket.listen.backlog.size = 50
3343 socket.receive.buffer.bytes = 102400
3344 socket.request.max.bytes = 104857600
3345 socket.send.buffer.bytes = 102400
3346 ssl.allow.dn.changes = false
3347 ssl.allow.san.changes = false
3348 ssl.cipher.suites = []
3349 ssl.client.auth = none
3350 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3351 ssl.endpoint.identification.algorithm = https
3352 ssl.engine.factory.class = null
3353 ssl.key.password = null
3354 ssl.keymanager.algorithm = SunX509
3355 ssl.keystore.certificate.chain = null
3356 ssl.keystore.key = null
3357 ssl.keystore.location = null
3358 ssl.keystore.password = null
3359 ssl.keystore.type = JKS
3360 ssl.principal.mapping.rules = DEFAULT
3361 ssl.protocol = TLSv1.3
3362 ssl.provider = null
3363 ssl.secure.random.implementation = null
3364 ssl.trustmanager.algorithm = PKIX
3365 ssl.truststore.certificates = null
3366 ssl.truststore.location = null
3367 ssl.truststore.password = null
3368 ssl.truststore.type = JKS
3369 telemetry.max.bytes = 1048576
3370 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
3371 transaction.max.timeout.ms = 900000
3372 transaction.partition.verification.enable = true
3373 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
3374 transaction.state.log.load.buffer.size = 5242880
3375 transaction.state.log.min.isr = 1
3376 transaction.state.log.num.partitions = 50
3377 transaction.state.log.replication.factor = 1
3378 transaction.state.log.segment.bytes = 104857600
3379 transaction.two.phase.commit.enable = false
3380 transactional.id.expiration.ms = 604800000
3381 unclean.leader.election.enable = false
3382 unclean.leader.election.interval.ms = 300000
3383 unstable.api.versions.enable = false
3384 unstable.feature.versions.enable = false
3385
338615:20:55.287 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker to be unfenced
338715:20:55.291 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker is in RECOVERY.
338815:20:55.292 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to unfence has been granted because it has caught up with the offset of its register broker record 5.
338915:20:55.301 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=-1, inControlledShutdown=0, logDirs=[])
339015:20:55.321 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has been unfenced. Transitioning from RECOVERY to RUNNING.
339115:20:55.322 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker to be unfenced
339215:20:55.323 [pool-67-thread-3] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint BROKER. Endpoint is now READY.
339315:20:55.324 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Enabling request processing.
339415:20:55.324 [pool-67-thread-3] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6001.
339515:20:55.325 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the authorizer futures to be completed
339615:20:55.325 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the authorizer futures to be completed
339715:20:55.325 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the SocketServer Acceptors to be started
339815:20:55.325 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
339915:20:55.325 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTING to STARTED
340015:20:55.350 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3401 acks = -1
3402 batch.size = 16384
3403 bootstrap.servers = [localhost:6001]
3404 buffer.memory = 33554432
3405 client.dns.lookup = use_all_dns_ips
3406 client.id = producer-1
3407 compression.gzip.level = -1
3408 compression.lz4.level = 9
3409 compression.type = none
3410 compression.zstd.level = 3
3411 connections.max.idle.ms = 540000
3412 delivery.timeout.ms = 120000
3413 enable.idempotence = true
3414 enable.metrics.push = true
3415 interceptor.classes = []
3416 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3417 linger.ms = 5
3418 max.block.ms = 10000
3419 max.in.flight.requests.per.connection = 5
3420 max.request.size = 1048576
3421 metadata.max.age.ms = 300000
3422 metadata.max.idle.ms = 300000
3423 metadata.recovery.rebootstrap.trigger.ms = 300000
3424 metadata.recovery.strategy = rebootstrap
3425 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3426 metrics.num.samples = 2
3427 metrics.recording.level = INFO
3428 metrics.sample.window.ms = 30000
3429 partitioner.adaptive.partitioning.enable = true
3430 partitioner.availability.timeout.ms = 0
3431 partitioner.class = null
3432 partitioner.ignore.keys = false
3433 receive.buffer.bytes = 32768
3434 reconnect.backoff.max.ms = 1000
3435 reconnect.backoff.ms = 50
3436 request.timeout.ms = 30000
3437 retries = 2147483647
3438 retry.backoff.max.ms = 1000
3439 retry.backoff.ms = 1000
3440 sasl.client.callback.handler.class = null
3441 sasl.jaas.config = null
3442 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3443 sasl.kerberos.min.time.before.relogin = 60000
3444 sasl.kerberos.service.name = null
3445 sasl.kerberos.ticket.renew.jitter = 0.05
3446 sasl.kerberos.ticket.renew.window.factor = 0.8
3447 sasl.login.callback.handler.class = null
3448 sasl.login.class = null
3449 sasl.login.connect.timeout.ms = null
3450 sasl.login.read.timeout.ms = null
3451 sasl.login.refresh.buffer.seconds = 300
3452 sasl.login.refresh.min.period.seconds = 60
3453 sasl.login.refresh.window.factor = 0.8
3454 sasl.login.refresh.window.jitter = 0.05
3455 sasl.login.retry.backoff.max.ms = 10000
3456 sasl.login.retry.backoff.ms = 100
3457 sasl.mechanism = GSSAPI
3458 sasl.oauthbearer.assertion.algorithm = RS256
3459 sasl.oauthbearer.assertion.claim.aud = null
3460 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3461 sasl.oauthbearer.assertion.claim.iss = null
3462 sasl.oauthbearer.assertion.claim.jti.include = false
3463 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3464 sasl.oauthbearer.assertion.claim.sub = null
3465 sasl.oauthbearer.assertion.file = null
3466 sasl.oauthbearer.assertion.private.key.file = null
3467 sasl.oauthbearer.assertion.private.key.passphrase = null
3468 sasl.oauthbearer.assertion.template.file = null
3469 sasl.oauthbearer.client.credentials.client.id = null
3470 sasl.oauthbearer.client.credentials.client.secret = null
3471 sasl.oauthbearer.clock.skew.seconds = 30
3472 sasl.oauthbearer.expected.audience = null
3473 sasl.oauthbearer.expected.issuer = null
3474 sasl.oauthbearer.header.urlencode = false
3475 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3476 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3477 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3478 sasl.oauthbearer.jwks.endpoint.url = null
3479 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3480 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3481 sasl.oauthbearer.scope = null
3482 sasl.oauthbearer.scope.claim.name = scope
3483 sasl.oauthbearer.sub.claim.name = sub
3484 sasl.oauthbearer.token.endpoint.url = null
3485 security.protocol = PLAINTEXT
3486 security.providers = null
3487 send.buffer.bytes = 131072
3488 socket.connection.setup.timeout.max.ms = 30000
3489 socket.connection.setup.timeout.ms = 10000
3490 ssl.cipher.suites = null
3491 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3492 ssl.endpoint.identification.algorithm = https
3493 ssl.engine.factory.class = null
3494 ssl.key.password = null
3495 ssl.keymanager.algorithm = SunX509
3496 ssl.keystore.certificate.chain = null
3497 ssl.keystore.key = null
3498 ssl.keystore.location = null
3499 ssl.keystore.password = null
3500 ssl.keystore.type = JKS
3501 ssl.protocol = TLSv1.3
3502 ssl.provider = null
3503 ssl.secure.random.implementation = null
3504 ssl.trustmanager.algorithm = PKIX
3505 ssl.truststore.certificates = null
3506 ssl.truststore.location = null
3507 ssl.truststore.password = null
3508 ssl.truststore.type = JKS
3509 transaction.timeout.ms = 60000
3510 transaction.two.phase.commit.enable = false
3511 transactional.id = null
3512 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3513
351415:20:55.373 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
351515:20:55.380 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Instantiated an idempotent producer.
351615:20:55.395 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
351715:20:55.395 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
351815:20:55.395 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314055394
351915:20:55.410 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t1) to the active controller.
352015:20:55.416 [kafka-producer-network-thread | producer-1] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-1] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t1=UNKNOWN_TOPIC_OR_PARTITION}
352115:20:55.417 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.Metadata - [Producer clientId=producer-1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
352215:20:55.426 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
352315:20:55.427 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t1 with topic ID 1J9IgnO9Sx2R04YbZwKGbQ.
352415:20:55.429 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t1-0 with topic ID 1J9IgnO9Sx2R04YbZwKGbQ and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
352515:20:55.438 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
352615:20:55.439 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t1-0)
352715:20:55.441 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t1-0 with topic id 1J9IgnO9Sx2R04YbZwKGbQ.
352815:20:55.445 [quorum-controller-0-event-handler] INFO o.a.k.c.ProducerIdControlManager - [QuorumController id=0] Replaying ProducerIdsRecord ProducerIdsRecord(brokerId=0, brokerEpoch=5, nextProducerId=1000)
352915:20:55.453 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t1-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
353015:20:55.454 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t1-0 in /tmp/kafka-logs5982689497894266552/t1-0 with properties {}
353115:20:55.455 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] No checkpointed highwatermark is found for partition t1-0
353215:20:55.457 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] Log loaded for partition t1-0 with initial high watermark 0
353315:20:55.461 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t1-0 with topic id Some(1J9IgnO9Sx2R04YbZwKGbQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
353415:20:56.425 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-1] ProducerId set to 0 with epoch 0
353515:20:56.458 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
353615:20:56.464 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
353715:20:56.464 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
353815:20:56.464 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
353915:20:56.464 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
354015:20:56.465 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-1 unregistered
354115:20:56.466 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3542 acks = -1
3543 batch.size = 16384
3544 bootstrap.servers = [localhost:6001]
3545 buffer.memory = 33554432
3546 client.dns.lookup = use_all_dns_ips
3547 client.id = producer-2
3548 compression.gzip.level = -1
3549 compression.lz4.level = 9
3550 compression.type = none
3551 compression.zstd.level = 3
3552 connections.max.idle.ms = 540000
3553 delivery.timeout.ms = 120000
3554 enable.idempotence = true
3555 enable.metrics.push = true
3556 interceptor.classes = []
3557 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3558 linger.ms = 5
3559 max.block.ms = 10000
3560 max.in.flight.requests.per.connection = 5
3561 max.request.size = 1048576
3562 metadata.max.age.ms = 300000
3563 metadata.max.idle.ms = 300000
3564 metadata.recovery.rebootstrap.trigger.ms = 300000
3565 metadata.recovery.strategy = rebootstrap
3566 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3567 metrics.num.samples = 2
3568 metrics.recording.level = INFO
3569 metrics.sample.window.ms = 30000
3570 partitioner.adaptive.partitioning.enable = true
3571 partitioner.availability.timeout.ms = 0
3572 partitioner.class = null
3573 partitioner.ignore.keys = false
3574 receive.buffer.bytes = 32768
3575 reconnect.backoff.max.ms = 1000
3576 reconnect.backoff.ms = 50
3577 request.timeout.ms = 30000
3578 retries = 2147483647
3579 retry.backoff.max.ms = 1000
3580 retry.backoff.ms = 1000
3581 sasl.client.callback.handler.class = null
3582 sasl.jaas.config = null
3583 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3584 sasl.kerberos.min.time.before.relogin = 60000
3585 sasl.kerberos.service.name = null
3586 sasl.kerberos.ticket.renew.jitter = 0.05
3587 sasl.kerberos.ticket.renew.window.factor = 0.8
3588 sasl.login.callback.handler.class = null
3589 sasl.login.class = null
3590 sasl.login.connect.timeout.ms = null
3591 sasl.login.read.timeout.ms = null
3592 sasl.login.refresh.buffer.seconds = 300
3593 sasl.login.refresh.min.period.seconds = 60
3594 sasl.login.refresh.window.factor = 0.8
3595 sasl.login.refresh.window.jitter = 0.05
3596 sasl.login.retry.backoff.max.ms = 10000
3597 sasl.login.retry.backoff.ms = 100
3598 sasl.mechanism = GSSAPI
3599 sasl.oauthbearer.assertion.algorithm = RS256
3600 sasl.oauthbearer.assertion.claim.aud = null
3601 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3602 sasl.oauthbearer.assertion.claim.iss = null
3603 sasl.oauthbearer.assertion.claim.jti.include = false
3604 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3605 sasl.oauthbearer.assertion.claim.sub = null
3606 sasl.oauthbearer.assertion.file = null
3607 sasl.oauthbearer.assertion.private.key.file = null
3608 sasl.oauthbearer.assertion.private.key.passphrase = null
3609 sasl.oauthbearer.assertion.template.file = null
3610 sasl.oauthbearer.client.credentials.client.id = null
3611 sasl.oauthbearer.client.credentials.client.secret = null
3612 sasl.oauthbearer.clock.skew.seconds = 30
3613 sasl.oauthbearer.expected.audience = null
3614 sasl.oauthbearer.expected.issuer = null
3615 sasl.oauthbearer.header.urlencode = false
3616 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3617 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3618 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3619 sasl.oauthbearer.jwks.endpoint.url = null
3620 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3621 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3622 sasl.oauthbearer.scope = null
3623 sasl.oauthbearer.scope.claim.name = scope
3624 sasl.oauthbearer.sub.claim.name = sub
3625 sasl.oauthbearer.token.endpoint.url = null
3626 security.protocol = PLAINTEXT
3627 security.providers = null
3628 send.buffer.bytes = 131072
3629 socket.connection.setup.timeout.max.ms = 30000
3630 socket.connection.setup.timeout.ms = 10000
3631 ssl.cipher.suites = null
3632 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3633 ssl.endpoint.identification.algorithm = https
3634 ssl.engine.factory.class = null
3635 ssl.key.password = null
3636 ssl.keymanager.algorithm = SunX509
3637 ssl.keystore.certificate.chain = null
3638 ssl.keystore.key = null
3639 ssl.keystore.location = null
3640 ssl.keystore.password = null
3641 ssl.keystore.type = JKS
3642 ssl.protocol = TLSv1.3
3643 ssl.provider = null
3644 ssl.secure.random.implementation = null
3645 ssl.trustmanager.algorithm = PKIX
3646 ssl.truststore.certificates = null
3647 ssl.truststore.location = null
3648 ssl.truststore.password = null
3649 ssl.truststore.type = JKS
3650 transaction.timeout.ms = 60000
3651 transaction.two.phase.commit.enable = false
3652 transactional.id = null
3653 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3654
365515:20:56.466 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
365615:20:56.467 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Instantiated an idempotent producer.
365715:20:56.471 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
365815:20:56.471 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
365915:20:56.471 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314056471
366015:20:56.476 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.Metadata - [Producer clientId=producer-2] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
366115:20:56.477 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-2] ProducerId set to 1 with epoch 0
366215:20:56.488 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
366315:20:56.492 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
366415:20:56.492 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
366515:20:56.493 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
366615:20:56.493 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
366715:20:56.493 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-2 unregistered
366815:20:56.494 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3669 acks = -1
3670 batch.size = 16384
3671 bootstrap.servers = [localhost:6001]
3672 buffer.memory = 33554432
3673 client.dns.lookup = use_all_dns_ips
3674 client.id = producer-3
3675 compression.gzip.level = -1
3676 compression.lz4.level = 9
3677 compression.type = none
3678 compression.zstd.level = 3
3679 connections.max.idle.ms = 540000
3680 delivery.timeout.ms = 120000
3681 enable.idempotence = true
3682 enable.metrics.push = true
3683 interceptor.classes = []
3684 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3685 linger.ms = 5
3686 max.block.ms = 10000
3687 max.in.flight.requests.per.connection = 5
3688 max.request.size = 1048576
3689 metadata.max.age.ms = 300000
3690 metadata.max.idle.ms = 300000
3691 metadata.recovery.rebootstrap.trigger.ms = 300000
3692 metadata.recovery.strategy = rebootstrap
3693 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3694 metrics.num.samples = 2
3695 metrics.recording.level = INFO
3696 metrics.sample.window.ms = 30000
3697 partitioner.adaptive.partitioning.enable = true
3698 partitioner.availability.timeout.ms = 0
3699 partitioner.class = null
3700 partitioner.ignore.keys = false
3701 receive.buffer.bytes = 32768
3702 reconnect.backoff.max.ms = 1000
3703 reconnect.backoff.ms = 50
3704 request.timeout.ms = 30000
3705 retries = 2147483647
3706 retry.backoff.max.ms = 1000
3707 retry.backoff.ms = 1000
3708 sasl.client.callback.handler.class = null
3709 sasl.jaas.config = null
3710 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3711 sasl.kerberos.min.time.before.relogin = 60000
3712 sasl.kerberos.service.name = null
3713 sasl.kerberos.ticket.renew.jitter = 0.05
3714 sasl.kerberos.ticket.renew.window.factor = 0.8
3715 sasl.login.callback.handler.class = null
3716 sasl.login.class = null
3717 sasl.login.connect.timeout.ms = null
3718 sasl.login.read.timeout.ms = null
3719 sasl.login.refresh.buffer.seconds = 300
3720 sasl.login.refresh.min.period.seconds = 60
3721 sasl.login.refresh.window.factor = 0.8
3722 sasl.login.refresh.window.jitter = 0.05
3723 sasl.login.retry.backoff.max.ms = 10000
3724 sasl.login.retry.backoff.ms = 100
3725 sasl.mechanism = GSSAPI
3726 sasl.oauthbearer.assertion.algorithm = RS256
3727 sasl.oauthbearer.assertion.claim.aud = null
3728 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3729 sasl.oauthbearer.assertion.claim.iss = null
3730 sasl.oauthbearer.assertion.claim.jti.include = false
3731 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3732 sasl.oauthbearer.assertion.claim.sub = null
3733 sasl.oauthbearer.assertion.file = null
3734 sasl.oauthbearer.assertion.private.key.file = null
3735 sasl.oauthbearer.assertion.private.key.passphrase = null
3736 sasl.oauthbearer.assertion.template.file = null
3737 sasl.oauthbearer.client.credentials.client.id = null
3738 sasl.oauthbearer.client.credentials.client.secret = null
3739 sasl.oauthbearer.clock.skew.seconds = 30
3740 sasl.oauthbearer.expected.audience = null
3741 sasl.oauthbearer.expected.issuer = null
3742 sasl.oauthbearer.header.urlencode = false
3743 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3744 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3745 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3746 sasl.oauthbearer.jwks.endpoint.url = null
3747 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3748 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3749 sasl.oauthbearer.scope = null
3750 sasl.oauthbearer.scope.claim.name = scope
3751 sasl.oauthbearer.sub.claim.name = sub
3752 sasl.oauthbearer.token.endpoint.url = null
3753 security.protocol = PLAINTEXT
3754 security.providers = null
3755 send.buffer.bytes = 131072
3756 socket.connection.setup.timeout.max.ms = 30000
3757 socket.connection.setup.timeout.ms = 10000
3758 ssl.cipher.suites = null
3759 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3760 ssl.endpoint.identification.algorithm = https
3761 ssl.engine.factory.class = null
3762 ssl.key.password = null
3763 ssl.keymanager.algorithm = SunX509
3764 ssl.keystore.certificate.chain = null
3765 ssl.keystore.key = null
3766 ssl.keystore.location = null
3767 ssl.keystore.password = null
3768 ssl.keystore.type = JKS
3769 ssl.protocol = TLSv1.3
3770 ssl.provider = null
3771 ssl.secure.random.implementation = null
3772 ssl.trustmanager.algorithm = PKIX
3773 ssl.truststore.certificates = null
3774 ssl.truststore.location = null
3775 ssl.truststore.password = null
3776 ssl.truststore.type = JKS
3777 transaction.timeout.ms = 60000
3778 transaction.two.phase.commit.enable = false
3779 transactional.id = null
3780 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3781
378215:20:56.494 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
378315:20:56.495 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Instantiated an idempotent producer.
378415:20:56.499 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
378515:20:56.499 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
378615:20:56.499 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314056499
378715:20:56.504 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.Metadata - [Producer clientId=producer-3] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
378815:20:56.505 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-3] ProducerId set to 2 with epoch 0
378915:20:56.515 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
379015:20:56.518 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
379115:20:56.518 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
379215:20:56.518 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
379315:20:56.518 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
379415:20:56.518 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-3 unregistered
379515:20:56.535 [virtual-594] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
3796 allow.auto.create.topics = true
3797 auto.commit.interval.ms = 5000
3798 auto.offset.reset = earliest
3799 bootstrap.servers = [localhost:6001]
3800 check.crcs = true
3801 client.dns.lookup = use_all_dns_ips
3802 client.id = consumer-g1-1
3803 client.rack =
3804 connections.max.idle.ms = 540000
3805 default.api.timeout.ms = 60000
3806 enable.auto.commit = false
3807 enable.metrics.push = true
3808 exclude.internal.topics = true
3809 fetch.max.bytes = 52428800
3810 fetch.max.wait.ms = 500
3811 fetch.min.bytes = 1
3812 group.id = g1
3813 group.instance.id = null
3814 group.protocol = classic
3815 group.remote.assignor = null
3816 heartbeat.interval.ms = 3000
3817 interceptor.classes = []
3818 internal.leave.group.on.close = true
3819 internal.throw.on.fetch.stable.offset.unsupported = false
3820 isolation.level = read_uncommitted
3821 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3822 max.partition.fetch.bytes = 1048576
3823 max.poll.interval.ms = 300000
3824 max.poll.records = 500
3825 metadata.max.age.ms = 300000
3826 metadata.recovery.rebootstrap.trigger.ms = 300000
3827 metadata.recovery.strategy = rebootstrap
3828 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3829 metrics.num.samples = 2
3830 metrics.recording.level = INFO
3831 metrics.sample.window.ms = 30000
3832 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
3833 receive.buffer.bytes = 65536
3834 reconnect.backoff.max.ms = 1000
3835 reconnect.backoff.ms = 50
3836 request.timeout.ms = 30000
3837 retry.backoff.max.ms = 1000
3838 retry.backoff.ms = 100
3839 sasl.client.callback.handler.class = null
3840 sasl.jaas.config = null
3841 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3842 sasl.kerberos.min.time.before.relogin = 60000
3843 sasl.kerberos.service.name = null
3844 sasl.kerberos.ticket.renew.jitter = 0.05
3845 sasl.kerberos.ticket.renew.window.factor = 0.8
3846 sasl.login.callback.handler.class = null
3847 sasl.login.class = null
3848 sasl.login.connect.timeout.ms = null
3849 sasl.login.read.timeout.ms = null
3850 sasl.login.refresh.buffer.seconds = 300
3851 sasl.login.refresh.min.period.seconds = 60
3852 sasl.login.refresh.window.factor = 0.8
3853 sasl.login.refresh.window.jitter = 0.05
3854 sasl.login.retry.backoff.max.ms = 10000
3855 sasl.login.retry.backoff.ms = 100
3856 sasl.mechanism = GSSAPI
3857 sasl.oauthbearer.assertion.algorithm = RS256
3858 sasl.oauthbearer.assertion.claim.aud = null
3859 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3860 sasl.oauthbearer.assertion.claim.iss = null
3861 sasl.oauthbearer.assertion.claim.jti.include = false
3862 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3863 sasl.oauthbearer.assertion.claim.sub = null
3864 sasl.oauthbearer.assertion.file = null
3865 sasl.oauthbearer.assertion.private.key.file = null
3866 sasl.oauthbearer.assertion.private.key.passphrase = null
3867 sasl.oauthbearer.assertion.template.file = null
3868 sasl.oauthbearer.client.credentials.client.id = null
3869 sasl.oauthbearer.client.credentials.client.secret = null
3870 sasl.oauthbearer.clock.skew.seconds = 30
3871 sasl.oauthbearer.expected.audience = null
3872 sasl.oauthbearer.expected.issuer = null
3873 sasl.oauthbearer.header.urlencode = false
3874 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3875 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3876 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3877 sasl.oauthbearer.jwks.endpoint.url = null
3878 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3879 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3880 sasl.oauthbearer.scope = null
3881 sasl.oauthbearer.scope.claim.name = scope
3882 sasl.oauthbearer.sub.claim.name = sub
3883 sasl.oauthbearer.token.endpoint.url = null
3884 security.protocol = PLAINTEXT
3885 security.providers = null
3886 send.buffer.bytes = 131072
3887 session.timeout.ms = 45000
3888 share.acknowledgement.mode = implicit
3889 socket.connection.setup.timeout.max.ms = 30000
3890 socket.connection.setup.timeout.ms = 10000
3891 ssl.cipher.suites = null
3892 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3893 ssl.endpoint.identification.algorithm = https
3894 ssl.engine.factory.class = null
3895 ssl.key.password = null
3896 ssl.keymanager.algorithm = SunX509
3897 ssl.keystore.certificate.chain = null
3898 ssl.keystore.key = null
3899 ssl.keystore.location = null
3900 ssl.keystore.password = null
3901 ssl.keystore.type = JKS
3902 ssl.protocol = TLSv1.3
3903 ssl.provider = null
3904 ssl.secure.random.implementation = null
3905 ssl.trustmanager.algorithm = PKIX
3906 ssl.truststore.certificates = null
3907 ssl.truststore.location = null
3908 ssl.truststore.password = null
3909 ssl.truststore.type = JKS
3910 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3911
391215:20:56.545 [virtual-594] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
391315:20:56.580 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
391415:20:56.580 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
391515:20:56.580 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314056580
391615:20:56.602 [virtual-601] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g1-1, groupId=g1] Subscribed to topic(s): t1
391715:20:56.609 [virtual-601] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g1-1, groupId=g1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
391815:20:56.610 [data-plane-kafka-request-handler-7] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(__consumer_offsets) to the active controller.
391915:20:56.614 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='__consumer_offsets', numPartitions=1, replicationFactor=1, assignments=[], configs=[CreatableTopicConfig(name='compression.type', value='producer'), CreatableTopicConfig(name='cleanup.policy', value='compact'), CreatableTopicConfig(name='segment.bytes', value='104857600')]): SUCCESS
392015:20:56.615 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic __consumer_offsets with topic ID z1FpZ7rqTseT7ZJlr_4QUQ.
392115:20:56.616 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration compression.type to producer
392215:20:56.616 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration cleanup.policy to compact
392315:20:56.616 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration segment.bytes to 104857600
392415:20:56.616 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition __consumer_offsets-0 with topic ID z1FpZ7rqTseT7ZJlr_4QUQ and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
392515:20:56.643 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
392615:20:56.643 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(__consumer_offsets-0)
392715:20:56.644 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition __consumer_offsets-0 with topic id z1FpZ7rqTseT7ZJlr_4QUQ.
392815:20:56.646 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__consumer_offsets-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
392915:20:56.647 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition __consumer_offsets-0 in /tmp/kafka-logs5982689497894266552/__consumer_offsets-0 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600}
393015:20:56.647 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0
393115:20:56.647 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0
393215:20:56.647 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader __consumer_offsets-0 with topic id Some(z1FpZ7rqTseT7ZJlr_4QUQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
393315:20:56.649 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling loading of metadata from __consumer_offsets-0 with epoch 0
393415:20:56.656 [kafka-0-metadata-loader-event-handler] INFO k.s.m.DynamicConfigPublisher - [DynamicConfigPublisher broker id=0] Updating topic __consumer_offsets with new configuration : compression.type -> producer,cleanup.policy -> compact,segment.bytes -> 104857600
393515:20:56.670 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished loading of metadata from __consumer_offsets-0 with epoch 0 in 1ms where 1ms was spent in the scheduler. Loaded 0 records which total to 0 bytes.
393615:20:56.710 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
393715:20:56.712 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
393815:20:56.723 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g1 in Empty state. Created a new member id consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b and requesting the member to rejoin with this id.
393915:20:56.724 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: need to re-join with the given member-id: consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b
394015:20:56.724 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
394115:20:56.728 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b joins group g1 in Empty state. Adding to the group now.
394215:20:56.729 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b with group instance id null; client reason: need to re-join with the given member-id: consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b).
394315:20:59.730 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g1 generation 1 with 1 members.
394415:20:59.733 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b', protocol='range'}
394515:20:59.739 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Finished assignment for group at generation 1: {consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b=Assignment(partitions=[t1-0])}
394615:20:59.742 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b for group g1 for generation 1. The group has 1 members, 0 of which are static.
394715:20:59.751 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b', protocol='range'}
394815:20:59.751 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Notifying assignor about the new Assignment(partitions=[t1-0])
394915:20:59.753 [virtual-601] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Adding newly assigned partitions: [t1-0]
395015:20:59.760 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Found no committed offset for partition t1-0
395115:20:59.771 [virtual-601] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting offset for partition t1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
395215:21:00.059 [virtual-594] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3953 acks = -1
3954 batch.size = 16384
3955 bootstrap.servers = [localhost:6001]
3956 buffer.memory = 33554432
3957 client.dns.lookup = use_all_dns_ips
3958 client.id = producer-4
3959 compression.gzip.level = -1
3960 compression.lz4.level = 9
3961 compression.type = none
3962 compression.zstd.level = 3
3963 connections.max.idle.ms = 540000
3964 delivery.timeout.ms = 120000
3965 enable.idempotence = true
3966 enable.metrics.push = true
3967 interceptor.classes = []
3968 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3969 linger.ms = 5
3970 max.block.ms = 10000
3971 max.in.flight.requests.per.connection = 5
3972 max.request.size = 1048576
3973 metadata.max.age.ms = 300000
3974 metadata.max.idle.ms = 300000
3975 metadata.recovery.rebootstrap.trigger.ms = 300000
3976 metadata.recovery.strategy = rebootstrap
3977 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3978 metrics.num.samples = 2
3979 metrics.recording.level = INFO
3980 metrics.sample.window.ms = 30000
3981 partitioner.adaptive.partitioning.enable = true
3982 partitioner.availability.timeout.ms = 0
3983 partitioner.class = null
3984 partitioner.ignore.keys = false
3985 receive.buffer.bytes = 32768
3986 reconnect.backoff.max.ms = 1000
3987 reconnect.backoff.ms = 50
3988 request.timeout.ms = 30000
3989 retries = 2147483647
3990 retry.backoff.max.ms = 1000
3991 retry.backoff.ms = 1000
3992 sasl.client.callback.handler.class = null
3993 sasl.jaas.config = null
3994 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3995 sasl.kerberos.min.time.before.relogin = 60000
3996 sasl.kerberos.service.name = null
3997 sasl.kerberos.ticket.renew.jitter = 0.05
3998 sasl.kerberos.ticket.renew.window.factor = 0.8
3999 sasl.login.callback.handler.class = null
4000 sasl.login.class = null
4001 sasl.login.connect.timeout.ms = null
4002 sasl.login.read.timeout.ms = null
4003 sasl.login.refresh.buffer.seconds = 300
4004 sasl.login.refresh.min.period.seconds = 60
4005 sasl.login.refresh.window.factor = 0.8
4006 sasl.login.refresh.window.jitter = 0.05
4007 sasl.login.retry.backoff.max.ms = 10000
4008 sasl.login.retry.backoff.ms = 100
4009 sasl.mechanism = GSSAPI
4010 sasl.oauthbearer.assertion.algorithm = RS256
4011 sasl.oauthbearer.assertion.claim.aud = null
4012 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4013 sasl.oauthbearer.assertion.claim.iss = null
4014 sasl.oauthbearer.assertion.claim.jti.include = false
4015 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4016 sasl.oauthbearer.assertion.claim.sub = null
4017 sasl.oauthbearer.assertion.file = null
4018 sasl.oauthbearer.assertion.private.key.file = null
4019 sasl.oauthbearer.assertion.private.key.passphrase = null
4020 sasl.oauthbearer.assertion.template.file = null
4021 sasl.oauthbearer.client.credentials.client.id = null
4022 sasl.oauthbearer.client.credentials.client.secret = null
4023 sasl.oauthbearer.clock.skew.seconds = 30
4024 sasl.oauthbearer.expected.audience = null
4025 sasl.oauthbearer.expected.issuer = null
4026 sasl.oauthbearer.header.urlencode = false
4027 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4028 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4029 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4030 sasl.oauthbearer.jwks.endpoint.url = null
4031 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4032 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4033 sasl.oauthbearer.scope = null
4034 sasl.oauthbearer.scope.claim.name = scope
4035 sasl.oauthbearer.sub.claim.name = sub
4036 sasl.oauthbearer.token.endpoint.url = null
4037 security.protocol = PLAINTEXT
4038 security.providers = null
4039 send.buffer.bytes = 131072
4040 socket.connection.setup.timeout.max.ms = 30000
4041 socket.connection.setup.timeout.ms = 10000
4042 ssl.cipher.suites = null
4043 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4044 ssl.endpoint.identification.algorithm = https
4045 ssl.engine.factory.class = null
4046 ssl.key.password = null
4047 ssl.keymanager.algorithm = SunX509
4048 ssl.keystore.certificate.chain = null
4049 ssl.keystore.key = null
4050 ssl.keystore.location = null
4051 ssl.keystore.password = null
4052 ssl.keystore.type = JKS
4053 ssl.protocol = TLSv1.3
4054 ssl.provider = null
4055 ssl.secure.random.implementation = null
4056 ssl.trustmanager.algorithm = PKIX
4057 ssl.truststore.certificates = null
4058 ssl.truststore.location = null
4059 ssl.truststore.password = null
4060 ssl.truststore.type = JKS
4061 transaction.timeout.ms = 60000
4062 transaction.two.phase.commit.enable = false
4063 transactional.id = null
4064 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4065
406615:21:00.059 [virtual-594] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
406715:21:00.060 [virtual-594] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Instantiated an idempotent producer.
406815:21:00.062 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
406915:21:00.062 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
407015:21:00.062 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314060062
407115:21:00.067 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.Metadata - [Producer clientId=producer-4] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
407215:21:00.069 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-4] ProducerId set to 3 with epoch 0
407315:21:00.082 [virtual-594] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
407415:21:00.088 [virtual-594] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
407515:21:00.088 [virtual-594] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
407615:21:00.089 [virtual-594] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
407715:21:00.090 [virtual-594] INFO o.a.k.c.m.Metrics - Metrics reporters closed
407815:21:00.091 [virtual-594] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-4 unregistered
407915:21:00.093 [virtual-600] ERROR o.k.KafkaFlow$ - Exception when polling for records
4080java.lang.InterruptedException: null
4081 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
4082 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
4083 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
4084 at ox.channels.ActorRef.ask(actor.scala:64)
4085 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
4086 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
4087 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4088 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4089 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
4090 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
4091 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
4092 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4093 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4094 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
409515:21:00.095 [virtual-601] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
4096java.lang.InterruptedException: null
4097 ... 18 common frames omitted
4098Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
4099 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
4100 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
4101 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
4102 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
4103 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
4104 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
4105 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
4106 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
4107 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
4108 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
4109 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4110 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4111 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
4112 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
4113 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
4114 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4115 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4116 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
411715:21:00.111 [virtual-607] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Revoke previously assigned partitions [t1-0]
411815:21:00.112 [virtual-607] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Member consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
411915:21:00.113 [virtual-607] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting generation and member id due to: consumer pro-actively leaving the group
412015:21:00.113 [virtual-607] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: consumer pro-actively leaving the group
412115:21:00.115 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g1] Member consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
412215:21:00.115 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g1-1-4a4189c5-1baa-4a90-b2cb-7ccfa42d4f1b) members.).
412315:21:00.116 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g1 with generation 2 is now empty.
412415:21:00.595 [virtual-607] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
412515:21:00.595 [virtual-607] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
412615:21:00.595 [virtual-607] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
412715:21:00.595 [virtual-607] INFO o.a.k.c.m.Metrics - Metrics reporters closed
412815:21:00.599 [virtual-607] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g1-1 unregistered
412915:21:00.611 [virtual-609] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4130 acks = -1
4131 batch.size = 16384
4132 bootstrap.servers = [localhost:6001]
4133 buffer.memory = 33554432
4134 client.dns.lookup = use_all_dns_ips
4135 client.id = producer-5
4136 compression.gzip.level = -1
4137 compression.lz4.level = 9
4138 compression.type = none
4139 compression.zstd.level = 3
4140 connections.max.idle.ms = 540000
4141 delivery.timeout.ms = 120000
4142 enable.idempotence = true
4143 enable.metrics.push = true
4144 interceptor.classes = []
4145 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4146 linger.ms = 5
4147 max.block.ms = 60000
4148 max.in.flight.requests.per.connection = 5
4149 max.request.size = 1048576
4150 metadata.max.age.ms = 300000
4151 metadata.max.idle.ms = 300000
4152 metadata.recovery.rebootstrap.trigger.ms = 300000
4153 metadata.recovery.strategy = rebootstrap
4154 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4155 metrics.num.samples = 2
4156 metrics.recording.level = INFO
4157 metrics.sample.window.ms = 30000
4158 partitioner.adaptive.partitioning.enable = true
4159 partitioner.availability.timeout.ms = 0
4160 partitioner.class = null
4161 partitioner.ignore.keys = false
4162 receive.buffer.bytes = 32768
4163 reconnect.backoff.max.ms = 1000
4164 reconnect.backoff.ms = 50
4165 request.timeout.ms = 30000
4166 retries = 2147483647
4167 retry.backoff.max.ms = 1000
4168 retry.backoff.ms = 100
4169 sasl.client.callback.handler.class = null
4170 sasl.jaas.config = null
4171 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4172 sasl.kerberos.min.time.before.relogin = 60000
4173 sasl.kerberos.service.name = null
4174 sasl.kerberos.ticket.renew.jitter = 0.05
4175 sasl.kerberos.ticket.renew.window.factor = 0.8
4176 sasl.login.callback.handler.class = null
4177 sasl.login.class = null
4178 sasl.login.connect.timeout.ms = null
4179 sasl.login.read.timeout.ms = null
4180 sasl.login.refresh.buffer.seconds = 300
4181 sasl.login.refresh.min.period.seconds = 60
4182 sasl.login.refresh.window.factor = 0.8
4183 sasl.login.refresh.window.jitter = 0.05
4184 sasl.login.retry.backoff.max.ms = 10000
4185 sasl.login.retry.backoff.ms = 100
4186 sasl.mechanism = GSSAPI
4187 sasl.oauthbearer.assertion.algorithm = RS256
4188 sasl.oauthbearer.assertion.claim.aud = null
4189 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4190 sasl.oauthbearer.assertion.claim.iss = null
4191 sasl.oauthbearer.assertion.claim.jti.include = false
4192 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4193 sasl.oauthbearer.assertion.claim.sub = null
4194 sasl.oauthbearer.assertion.file = null
4195 sasl.oauthbearer.assertion.private.key.file = null
4196 sasl.oauthbearer.assertion.private.key.passphrase = null
4197 sasl.oauthbearer.assertion.template.file = null
4198 sasl.oauthbearer.client.credentials.client.id = null
4199 sasl.oauthbearer.client.credentials.client.secret = null
4200 sasl.oauthbearer.clock.skew.seconds = 30
4201 sasl.oauthbearer.expected.audience = null
4202 sasl.oauthbearer.expected.issuer = null
4203 sasl.oauthbearer.header.urlencode = false
4204 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4205 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4206 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4207 sasl.oauthbearer.jwks.endpoint.url = null
4208 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4209 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4210 sasl.oauthbearer.scope = null
4211 sasl.oauthbearer.scope.claim.name = scope
4212 sasl.oauthbearer.sub.claim.name = sub
4213 sasl.oauthbearer.token.endpoint.url = null
4214 security.protocol = PLAINTEXT
4215 security.providers = null
4216 send.buffer.bytes = 131072
4217 socket.connection.setup.timeout.max.ms = 30000
4218 socket.connection.setup.timeout.ms = 10000
4219 ssl.cipher.suites = null
4220 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4221 ssl.endpoint.identification.algorithm = https
4222 ssl.engine.factory.class = null
4223 ssl.key.password = null
4224 ssl.keymanager.algorithm = SunX509
4225 ssl.keystore.certificate.chain = null
4226 ssl.keystore.key = null
4227 ssl.keystore.location = null
4228 ssl.keystore.password = null
4229 ssl.keystore.type = JKS
4230 ssl.protocol = TLSv1.3
4231 ssl.provider = null
4232 ssl.secure.random.implementation = null
4233 ssl.trustmanager.algorithm = PKIX
4234 ssl.truststore.certificates = null
4235 ssl.truststore.location = null
4236 ssl.truststore.password = null
4237 ssl.truststore.type = JKS
4238 transaction.timeout.ms = 60000
4239 transaction.two.phase.commit.enable = false
4240 transactional.id = null
4241 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4242
424315:21:00.611 [virtual-609] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
424415:21:00.611 [virtual-609] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Instantiated an idempotent producer.
424515:21:00.614 [virtual-609] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
424615:21:00.614 [virtual-609] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
424715:21:00.614 [virtual-609] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314060614
424815:21:00.619 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.Metadata - [Producer clientId=producer-5] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
424915:21:00.620 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-5] ProducerId set to 4 with epoch 0
425015:21:00.636 [data-plane-kafka-request-handler-4] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t2) to the active controller.
425115:21:00.637 [kafka-producer-network-thread | producer-5] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-5] The metadata response from the cluster reported a recoverable issue with correlation id 5 : {t2=UNKNOWN_TOPIC_OR_PARTITION}
425215:21:00.638 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
425315:21:00.638 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t2 with topic ID AvGLvUHIRcyhRUcQeZqz_g.
425415:21:00.638 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t2-0 with topic ID AvGLvUHIRcyhRUcQeZqz_g and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
425515:21:00.665 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
425615:21:00.665 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t2-0)
425715:21:00.665 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t2-0 with topic id AvGLvUHIRcyhRUcQeZqz_g.
425815:21:00.667 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t2-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
425915:21:00.668 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t2-0 in /tmp/kafka-logs5982689497894266552/t2-0 with properties {}
426015:21:00.668 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] No checkpointed highwatermark is found for partition t2-0
426115:21:00.668 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] Log loaded for partition t2-0 with initial high watermark 0
426215:21:00.670 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t2-0 with topic id Some(AvGLvUHIRcyhRUcQeZqz_g) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
426315:21:00.857 [virtual-613] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
426415:21:00.860 [virtual-613] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
426515:21:00.860 [virtual-613] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
426615:21:00.861 [virtual-613] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
426715:21:00.861 [virtual-613] INFO o.a.k.c.m.Metrics - Metrics reporters closed
426815:21:00.861 [virtual-613] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-5 unregistered
426915:21:00.865 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4270 allow.auto.create.topics = true
4271 auto.commit.interval.ms = 5000
4272 auto.offset.reset = earliest
4273 bootstrap.servers = [localhost:6001]
4274 check.crcs = true
4275 client.dns.lookup = use_all_dns_ips
4276 client.id = consumer-embedded-kafka-spec-2
4277 client.rack =
4278 connections.max.idle.ms = 540000
4279 default.api.timeout.ms = 60000
4280 enable.auto.commit = false
4281 enable.metrics.push = true
4282 exclude.internal.topics = true
4283 fetch.max.bytes = 52428800
4284 fetch.max.wait.ms = 500
4285 fetch.min.bytes = 1
4286 group.id = embedded-kafka-spec
4287 group.instance.id = null
4288 group.protocol = classic
4289 group.remote.assignor = null
4290 heartbeat.interval.ms = 3000
4291 interceptor.classes = []
4292 internal.leave.group.on.close = true
4293 internal.throw.on.fetch.stable.offset.unsupported = false
4294 isolation.level = read_uncommitted
4295 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4296 max.partition.fetch.bytes = 1048576
4297 max.poll.interval.ms = 300000
4298 max.poll.records = 500
4299 metadata.max.age.ms = 300000
4300 metadata.recovery.rebootstrap.trigger.ms = 300000
4301 metadata.recovery.strategy = rebootstrap
4302 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4303 metrics.num.samples = 2
4304 metrics.recording.level = INFO
4305 metrics.sample.window.ms = 30000
4306 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4307 receive.buffer.bytes = 65536
4308 reconnect.backoff.max.ms = 1000
4309 reconnect.backoff.ms = 50
4310 request.timeout.ms = 30000
4311 retry.backoff.max.ms = 1000
4312 retry.backoff.ms = 100
4313 sasl.client.callback.handler.class = null
4314 sasl.jaas.config = null
4315 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4316 sasl.kerberos.min.time.before.relogin = 60000
4317 sasl.kerberos.service.name = null
4318 sasl.kerberos.ticket.renew.jitter = 0.05
4319 sasl.kerberos.ticket.renew.window.factor = 0.8
4320 sasl.login.callback.handler.class = null
4321 sasl.login.class = null
4322 sasl.login.connect.timeout.ms = null
4323 sasl.login.read.timeout.ms = null
4324 sasl.login.refresh.buffer.seconds = 300
4325 sasl.login.refresh.min.period.seconds = 60
4326 sasl.login.refresh.window.factor = 0.8
4327 sasl.login.refresh.window.jitter = 0.05
4328 sasl.login.retry.backoff.max.ms = 10000
4329 sasl.login.retry.backoff.ms = 100
4330 sasl.mechanism = GSSAPI
4331 sasl.oauthbearer.assertion.algorithm = RS256
4332 sasl.oauthbearer.assertion.claim.aud = null
4333 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4334 sasl.oauthbearer.assertion.claim.iss = null
4335 sasl.oauthbearer.assertion.claim.jti.include = false
4336 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4337 sasl.oauthbearer.assertion.claim.sub = null
4338 sasl.oauthbearer.assertion.file = null
4339 sasl.oauthbearer.assertion.private.key.file = null
4340 sasl.oauthbearer.assertion.private.key.passphrase = null
4341 sasl.oauthbearer.assertion.template.file = null
4342 sasl.oauthbearer.client.credentials.client.id = null
4343 sasl.oauthbearer.client.credentials.client.secret = null
4344 sasl.oauthbearer.clock.skew.seconds = 30
4345 sasl.oauthbearer.expected.audience = null
4346 sasl.oauthbearer.expected.issuer = null
4347 sasl.oauthbearer.header.urlencode = false
4348 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4349 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4350 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4351 sasl.oauthbearer.jwks.endpoint.url = null
4352 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4353 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4354 sasl.oauthbearer.scope = null
4355 sasl.oauthbearer.scope.claim.name = scope
4356 sasl.oauthbearer.sub.claim.name = sub
4357 sasl.oauthbearer.token.endpoint.url = null
4358 security.protocol = PLAINTEXT
4359 security.providers = null
4360 send.buffer.bytes = 131072
4361 session.timeout.ms = 45000
4362 share.acknowledgement.mode = implicit
4363 socket.connection.setup.timeout.max.ms = 30000
4364 socket.connection.setup.timeout.ms = 10000
4365 ssl.cipher.suites = null
4366 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4367 ssl.endpoint.identification.algorithm = https
4368 ssl.engine.factory.class = null
4369 ssl.key.password = null
4370 ssl.keymanager.algorithm = SunX509
4371 ssl.keystore.certificate.chain = null
4372 ssl.keystore.key = null
4373 ssl.keystore.location = null
4374 ssl.keystore.password = null
4375 ssl.keystore.type = JKS
4376 ssl.protocol = TLSv1.3
4377 ssl.provider = null
4378 ssl.secure.random.implementation = null
4379 ssl.trustmanager.algorithm = PKIX
4380 ssl.truststore.certificates = null
4381 ssl.truststore.location = null
4382 ssl.truststore.password = null
4383 ssl.truststore.type = JKS
4384 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4385
438615:21:00.865 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
438715:21:00.868 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
438815:21:00.868 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
438915:21:00.868 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314060868
439015:21:00.869 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Subscribed to topic(s): t2
439115:21:00.873 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
439215:21:00.878 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
439315:21:00.879 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439415:21:00.882 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb and requesting the member to rejoin with this id.
439515:21:00.883 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb
439615:21:00.883 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439715:21:00.884 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb joins group embedded-kafka-spec in Empty state. Adding to the group now.
439815:21:00.884 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb).
439915:21:03.884 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 1 with 1 members.
440015:21:03.885 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb', protocol='range'}
440115:21:03.885 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Finished assignment for group at generation 1: {consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb=Assignment(partitions=[t2-0])}
440215:21:03.886 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb for group embedded-kafka-spec for generation 1. The group has 1 members, 0 of which are static.
440315:21:03.893 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb', protocol='range'}
440415:21:03.894 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t2-0])
440515:21:03.894 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t2-0]
440615:21:03.895 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Found no committed offset for partition t2-0
440715:21:03.897 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting offset for partition t2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
440815:21:10.365 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t2-0]
440915:21:10.365 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
441015:21:10.365 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
441115:21:10.365 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
441215:21:10.365 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
441315:21:10.366 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-2-22b5a5e9-88fe-4ef2-bde7-55c0ea3b91bb) members.).
441415:21:10.366 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 2 is now empty.
441515:21:10.373 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
441615:21:10.373 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
441715:21:10.373 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
441815:21:10.373 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
441915:21:10.376 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-2 unregistered
442015:21:10.379 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4421 acks = -1
4422 batch.size = 16384
4423 bootstrap.servers = [localhost:6001]
4424 buffer.memory = 33554432
4425 client.dns.lookup = use_all_dns_ips
4426 client.id = producer-6
4427 compression.gzip.level = -1
4428 compression.lz4.level = 9
4429 compression.type = none
4430 compression.zstd.level = 3
4431 connections.max.idle.ms = 540000
4432 delivery.timeout.ms = 120000
4433 enable.idempotence = true
4434 enable.metrics.push = true
4435 interceptor.classes = []
4436 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4437 linger.ms = 5
4438 max.block.ms = 10000
4439 max.in.flight.requests.per.connection = 5
4440 max.request.size = 1048576
4441 metadata.max.age.ms = 300000
4442 metadata.max.idle.ms = 300000
4443 metadata.recovery.rebootstrap.trigger.ms = 300000
4444 metadata.recovery.strategy = rebootstrap
4445 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4446 metrics.num.samples = 2
4447 metrics.recording.level = INFO
4448 metrics.sample.window.ms = 30000
4449 partitioner.adaptive.partitioning.enable = true
4450 partitioner.availability.timeout.ms = 0
4451 partitioner.class = null
4452 partitioner.ignore.keys = false
4453 receive.buffer.bytes = 32768
4454 reconnect.backoff.max.ms = 1000
4455 reconnect.backoff.ms = 50
4456 request.timeout.ms = 30000
4457 retries = 2147483647
4458 retry.backoff.max.ms = 1000
4459 retry.backoff.ms = 1000
4460 sasl.client.callback.handler.class = null
4461 sasl.jaas.config = null
4462 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4463 sasl.kerberos.min.time.before.relogin = 60000
4464 sasl.kerberos.service.name = null
4465 sasl.kerberos.ticket.renew.jitter = 0.05
4466 sasl.kerberos.ticket.renew.window.factor = 0.8
4467 sasl.login.callback.handler.class = null
4468 sasl.login.class = null
4469 sasl.login.connect.timeout.ms = null
4470 sasl.login.read.timeout.ms = null
4471 sasl.login.refresh.buffer.seconds = 300
4472 sasl.login.refresh.min.period.seconds = 60
4473 sasl.login.refresh.window.factor = 0.8
4474 sasl.login.refresh.window.jitter = 0.05
4475 sasl.login.retry.backoff.max.ms = 10000
4476 sasl.login.retry.backoff.ms = 100
4477 sasl.mechanism = GSSAPI
4478 sasl.oauthbearer.assertion.algorithm = RS256
4479 sasl.oauthbearer.assertion.claim.aud = null
4480 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4481 sasl.oauthbearer.assertion.claim.iss = null
4482 sasl.oauthbearer.assertion.claim.jti.include = false
4483 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4484 sasl.oauthbearer.assertion.claim.sub = null
4485 sasl.oauthbearer.assertion.file = null
4486 sasl.oauthbearer.assertion.private.key.file = null
4487 sasl.oauthbearer.assertion.private.key.passphrase = null
4488 sasl.oauthbearer.assertion.template.file = null
4489 sasl.oauthbearer.client.credentials.client.id = null
4490 sasl.oauthbearer.client.credentials.client.secret = null
4491 sasl.oauthbearer.clock.skew.seconds = 30
4492 sasl.oauthbearer.expected.audience = null
4493 sasl.oauthbearer.expected.issuer = null
4494 sasl.oauthbearer.header.urlencode = false
4495 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4496 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4497 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4498 sasl.oauthbearer.jwks.endpoint.url = null
4499 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4500 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4501 sasl.oauthbearer.scope = null
4502 sasl.oauthbearer.scope.claim.name = scope
4503 sasl.oauthbearer.sub.claim.name = sub
4504 sasl.oauthbearer.token.endpoint.url = null
4505 security.protocol = PLAINTEXT
4506 security.providers = null
4507 send.buffer.bytes = 131072
4508 socket.connection.setup.timeout.max.ms = 30000
4509 socket.connection.setup.timeout.ms = 10000
4510 ssl.cipher.suites = null
4511 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4512 ssl.endpoint.identification.algorithm = https
4513 ssl.engine.factory.class = null
4514 ssl.key.password = null
4515 ssl.keymanager.algorithm = SunX509
4516 ssl.keystore.certificate.chain = null
4517 ssl.keystore.key = null
4518 ssl.keystore.location = null
4519 ssl.keystore.password = null
4520 ssl.keystore.type = JKS
4521 ssl.protocol = TLSv1.3
4522 ssl.provider = null
4523 ssl.secure.random.implementation = null
4524 ssl.trustmanager.algorithm = PKIX
4525 ssl.truststore.certificates = null
4526 ssl.truststore.location = null
4527 ssl.truststore.password = null
4528 ssl.truststore.type = JKS
4529 transaction.timeout.ms = 60000
4530 transaction.two.phase.commit.enable = false
4531 transactional.id = null
4532 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4533
453415:21:10.379 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
453515:21:10.380 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Instantiated an idempotent producer.
453615:21:10.382 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
453715:21:10.382 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
453815:21:10.382 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314070382
453915:21:10.386 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_1) to the active controller.
454015:21:10.387 [kafka-producer-network-thread | producer-6] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-6] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t3_1=UNKNOWN_TOPIC_OR_PARTITION}
454115:21:10.387 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.Metadata - [Producer clientId=producer-6] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
454215:21:10.387 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
454315:21:10.387 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-6] ProducerId set to 5 with epoch 0
454415:21:10.388 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_1 with topic ID DHhr9AvDSty-TnD91vsglA.
454515:21:10.388 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_1-0 with topic ID DHhr9AvDSty-TnD91vsglA and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
454615:21:10.414 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
454715:21:10.415 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_1-0)
454815:21:10.415 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_1-0 with topic id DHhr9AvDSty-TnD91vsglA.
454915:21:10.418 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_1-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
455015:21:10.418 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_1-0 in /tmp/kafka-logs5982689497894266552/t3_1-0 with properties {}
455115:21:10.418 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] No checkpointed highwatermark is found for partition t3_1-0
455215:21:10.418 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] Log loaded for partition t3_1-0 with initial high watermark 0
455315:21:10.419 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_1-0 with topic id Some(DHhr9AvDSty-TnD91vsglA) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
455415:21:11.399 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
455515:21:11.401 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
455615:21:11.401 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
455715:21:11.401 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
455815:21:11.401 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
455915:21:11.402 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-6 unregistered
456015:21:11.402 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4561 acks = -1
4562 batch.size = 16384
4563 bootstrap.servers = [localhost:6001]
4564 buffer.memory = 33554432
4565 client.dns.lookup = use_all_dns_ips
4566 client.id = producer-7
4567 compression.gzip.level = -1
4568 compression.lz4.level = 9
4569 compression.type = none
4570 compression.zstd.level = 3
4571 connections.max.idle.ms = 540000
4572 delivery.timeout.ms = 120000
4573 enable.idempotence = true
4574 enable.metrics.push = true
4575 interceptor.classes = []
4576 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4577 linger.ms = 5
4578 max.block.ms = 10000
4579 max.in.flight.requests.per.connection = 5
4580 max.request.size = 1048576
4581 metadata.max.age.ms = 300000
4582 metadata.max.idle.ms = 300000
4583 metadata.recovery.rebootstrap.trigger.ms = 300000
4584 metadata.recovery.strategy = rebootstrap
4585 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4586 metrics.num.samples = 2
4587 metrics.recording.level = INFO
4588 metrics.sample.window.ms = 30000
4589 partitioner.adaptive.partitioning.enable = true
4590 partitioner.availability.timeout.ms = 0
4591 partitioner.class = null
4592 partitioner.ignore.keys = false
4593 receive.buffer.bytes = 32768
4594 reconnect.backoff.max.ms = 1000
4595 reconnect.backoff.ms = 50
4596 request.timeout.ms = 30000
4597 retries = 2147483647
4598 retry.backoff.max.ms = 1000
4599 retry.backoff.ms = 1000
4600 sasl.client.callback.handler.class = null
4601 sasl.jaas.config = null
4602 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4603 sasl.kerberos.min.time.before.relogin = 60000
4604 sasl.kerberos.service.name = null
4605 sasl.kerberos.ticket.renew.jitter = 0.05
4606 sasl.kerberos.ticket.renew.window.factor = 0.8
4607 sasl.login.callback.handler.class = null
4608 sasl.login.class = null
4609 sasl.login.connect.timeout.ms = null
4610 sasl.login.read.timeout.ms = null
4611 sasl.login.refresh.buffer.seconds = 300
4612 sasl.login.refresh.min.period.seconds = 60
4613 sasl.login.refresh.window.factor = 0.8
4614 sasl.login.refresh.window.jitter = 0.05
4615 sasl.login.retry.backoff.max.ms = 10000
4616 sasl.login.retry.backoff.ms = 100
4617 sasl.mechanism = GSSAPI
4618 sasl.oauthbearer.assertion.algorithm = RS256
4619 sasl.oauthbearer.assertion.claim.aud = null
4620 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4621 sasl.oauthbearer.assertion.claim.iss = null
4622 sasl.oauthbearer.assertion.claim.jti.include = false
4623 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4624 sasl.oauthbearer.assertion.claim.sub = null
4625 sasl.oauthbearer.assertion.file = null
4626 sasl.oauthbearer.assertion.private.key.file = null
4627 sasl.oauthbearer.assertion.private.key.passphrase = null
4628 sasl.oauthbearer.assertion.template.file = null
4629 sasl.oauthbearer.client.credentials.client.id = null
4630 sasl.oauthbearer.client.credentials.client.secret = null
4631 sasl.oauthbearer.clock.skew.seconds = 30
4632 sasl.oauthbearer.expected.audience = null
4633 sasl.oauthbearer.expected.issuer = null
4634 sasl.oauthbearer.header.urlencode = false
4635 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4636 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4637 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4638 sasl.oauthbearer.jwks.endpoint.url = null
4639 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4640 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4641 sasl.oauthbearer.scope = null
4642 sasl.oauthbearer.scope.claim.name = scope
4643 sasl.oauthbearer.sub.claim.name = sub
4644 sasl.oauthbearer.token.endpoint.url = null
4645 security.protocol = PLAINTEXT
4646 security.providers = null
4647 send.buffer.bytes = 131072
4648 socket.connection.setup.timeout.max.ms = 30000
4649 socket.connection.setup.timeout.ms = 10000
4650 ssl.cipher.suites = null
4651 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4652 ssl.endpoint.identification.algorithm = https
4653 ssl.engine.factory.class = null
4654 ssl.key.password = null
4655 ssl.keymanager.algorithm = SunX509
4656 ssl.keystore.certificate.chain = null
4657 ssl.keystore.key = null
4658 ssl.keystore.location = null
4659 ssl.keystore.password = null
4660 ssl.keystore.type = JKS
4661 ssl.protocol = TLSv1.3
4662 ssl.provider = null
4663 ssl.secure.random.implementation = null
4664 ssl.trustmanager.algorithm = PKIX
4665 ssl.truststore.certificates = null
4666 ssl.truststore.location = null
4667 ssl.truststore.password = null
4668 ssl.truststore.type = JKS
4669 transaction.timeout.ms = 60000
4670 transaction.two.phase.commit.enable = false
4671 transactional.id = null
4672 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4673
467415:21:11.403 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
467515:21:11.403 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Instantiated an idempotent producer.
467615:21:11.405 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
467715:21:11.405 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
467815:21:11.405 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314071405
467915:21:11.409 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.Metadata - [Producer clientId=producer-7] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
468015:21:11.409 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-7] ProducerId set to 6 with epoch 0
468115:21:11.419 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
468215:21:11.422 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
468315:21:11.422 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
468415:21:11.422 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
468515:21:11.422 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
468615:21:11.422 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-7 unregistered
468715:21:11.423 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4688 acks = -1
4689 batch.size = 16384
4690 bootstrap.servers = [localhost:6001]
4691 buffer.memory = 33554432
4692 client.dns.lookup = use_all_dns_ips
4693 client.id = producer-8
4694 compression.gzip.level = -1
4695 compression.lz4.level = 9
4696 compression.type = none
4697 compression.zstd.level = 3
4698 connections.max.idle.ms = 540000
4699 delivery.timeout.ms = 120000
4700 enable.idempotence = true
4701 enable.metrics.push = true
4702 interceptor.classes = []
4703 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4704 linger.ms = 5
4705 max.block.ms = 10000
4706 max.in.flight.requests.per.connection = 5
4707 max.request.size = 1048576
4708 metadata.max.age.ms = 300000
4709 metadata.max.idle.ms = 300000
4710 metadata.recovery.rebootstrap.trigger.ms = 300000
4711 metadata.recovery.strategy = rebootstrap
4712 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4713 metrics.num.samples = 2
4714 metrics.recording.level = INFO
4715 metrics.sample.window.ms = 30000
4716 partitioner.adaptive.partitioning.enable = true
4717 partitioner.availability.timeout.ms = 0
4718 partitioner.class = null
4719 partitioner.ignore.keys = false
4720 receive.buffer.bytes = 32768
4721 reconnect.backoff.max.ms = 1000
4722 reconnect.backoff.ms = 50
4723 request.timeout.ms = 30000
4724 retries = 2147483647
4725 retry.backoff.max.ms = 1000
4726 retry.backoff.ms = 1000
4727 sasl.client.callback.handler.class = null
4728 sasl.jaas.config = null
4729 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4730 sasl.kerberos.min.time.before.relogin = 60000
4731 sasl.kerberos.service.name = null
4732 sasl.kerberos.ticket.renew.jitter = 0.05
4733 sasl.kerberos.ticket.renew.window.factor = 0.8
4734 sasl.login.callback.handler.class = null
4735 sasl.login.class = null
4736 sasl.login.connect.timeout.ms = null
4737 sasl.login.read.timeout.ms = null
4738 sasl.login.refresh.buffer.seconds = 300
4739 sasl.login.refresh.min.period.seconds = 60
4740 sasl.login.refresh.window.factor = 0.8
4741 sasl.login.refresh.window.jitter = 0.05
4742 sasl.login.retry.backoff.max.ms = 10000
4743 sasl.login.retry.backoff.ms = 100
4744 sasl.mechanism = GSSAPI
4745 sasl.oauthbearer.assertion.algorithm = RS256
4746 sasl.oauthbearer.assertion.claim.aud = null
4747 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4748 sasl.oauthbearer.assertion.claim.iss = null
4749 sasl.oauthbearer.assertion.claim.jti.include = false
4750 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4751 sasl.oauthbearer.assertion.claim.sub = null
4752 sasl.oauthbearer.assertion.file = null
4753 sasl.oauthbearer.assertion.private.key.file = null
4754 sasl.oauthbearer.assertion.private.key.passphrase = null
4755 sasl.oauthbearer.assertion.template.file = null
4756 sasl.oauthbearer.client.credentials.client.id = null
4757 sasl.oauthbearer.client.credentials.client.secret = null
4758 sasl.oauthbearer.clock.skew.seconds = 30
4759 sasl.oauthbearer.expected.audience = null
4760 sasl.oauthbearer.expected.issuer = null
4761 sasl.oauthbearer.header.urlencode = false
4762 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4763 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4764 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4765 sasl.oauthbearer.jwks.endpoint.url = null
4766 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4767 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4768 sasl.oauthbearer.scope = null
4769 sasl.oauthbearer.scope.claim.name = scope
4770 sasl.oauthbearer.sub.claim.name = sub
4771 sasl.oauthbearer.token.endpoint.url = null
4772 security.protocol = PLAINTEXT
4773 security.providers = null
4774 send.buffer.bytes = 131072
4775 socket.connection.setup.timeout.max.ms = 30000
4776 socket.connection.setup.timeout.ms = 10000
4777 ssl.cipher.suites = null
4778 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4779 ssl.endpoint.identification.algorithm = https
4780 ssl.engine.factory.class = null
4781 ssl.key.password = null
4782 ssl.keymanager.algorithm = SunX509
4783 ssl.keystore.certificate.chain = null
4784 ssl.keystore.key = null
4785 ssl.keystore.location = null
4786 ssl.keystore.password = null
4787 ssl.keystore.type = JKS
4788 ssl.protocol = TLSv1.3
4789 ssl.provider = null
4790 ssl.secure.random.implementation = null
4791 ssl.trustmanager.algorithm = PKIX
4792 ssl.truststore.certificates = null
4793 ssl.truststore.location = null
4794 ssl.truststore.password = null
4795 ssl.truststore.type = JKS
4796 transaction.timeout.ms = 60000
4797 transaction.two.phase.commit.enable = false
4798 transactional.id = null
4799 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4800
480115:21:11.423 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
480215:21:11.423 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Instantiated an idempotent producer.
480315:21:11.425 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
480415:21:11.425 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
480515:21:11.426 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314071425
480615:21:11.428 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.Metadata - [Producer clientId=producer-8] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
480715:21:11.429 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-8] ProducerId set to 7 with epoch 0
480815:21:11.438 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
480915:21:11.440 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
481015:21:11.440 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
481115:21:11.440 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
481215:21:11.440 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
481315:21:11.440 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-8 unregistered
481415:21:11.441 [virtual-618] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4815 allow.auto.create.topics = true
4816 auto.commit.interval.ms = 5000
4817 auto.offset.reset = earliest
4818 bootstrap.servers = [localhost:6001]
4819 check.crcs = true
4820 client.dns.lookup = use_all_dns_ips
4821 client.id = consumer-g3_1-3
4822 client.rack =
4823 connections.max.idle.ms = 540000
4824 default.api.timeout.ms = 60000
4825 enable.auto.commit = false
4826 enable.metrics.push = true
4827 exclude.internal.topics = true
4828 fetch.max.bytes = 52428800
4829 fetch.max.wait.ms = 500
4830 fetch.min.bytes = 1
4831 group.id = g3_1
4832 group.instance.id = null
4833 group.protocol = classic
4834 group.remote.assignor = null
4835 heartbeat.interval.ms = 3000
4836 interceptor.classes = []
4837 internal.leave.group.on.close = true
4838 internal.throw.on.fetch.stable.offset.unsupported = false
4839 isolation.level = read_uncommitted
4840 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4841 max.partition.fetch.bytes = 1048576
4842 max.poll.interval.ms = 300000
4843 max.poll.records = 500
4844 metadata.max.age.ms = 300000
4845 metadata.recovery.rebootstrap.trigger.ms = 300000
4846 metadata.recovery.strategy = rebootstrap
4847 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4848 metrics.num.samples = 2
4849 metrics.recording.level = INFO
4850 metrics.sample.window.ms = 30000
4851 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4852 receive.buffer.bytes = 65536
4853 reconnect.backoff.max.ms = 1000
4854 reconnect.backoff.ms = 50
4855 request.timeout.ms = 30000
4856 retry.backoff.max.ms = 1000
4857 retry.backoff.ms = 100
4858 sasl.client.callback.handler.class = null
4859 sasl.jaas.config = null
4860 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4861 sasl.kerberos.min.time.before.relogin = 60000
4862 sasl.kerberos.service.name = null
4863 sasl.kerberos.ticket.renew.jitter = 0.05
4864 sasl.kerberos.ticket.renew.window.factor = 0.8
4865 sasl.login.callback.handler.class = null
4866 sasl.login.class = null
4867 sasl.login.connect.timeout.ms = null
4868 sasl.login.read.timeout.ms = null
4869 sasl.login.refresh.buffer.seconds = 300
4870 sasl.login.refresh.min.period.seconds = 60
4871 sasl.login.refresh.window.factor = 0.8
4872 sasl.login.refresh.window.jitter = 0.05
4873 sasl.login.retry.backoff.max.ms = 10000
4874 sasl.login.retry.backoff.ms = 100
4875 sasl.mechanism = GSSAPI
4876 sasl.oauthbearer.assertion.algorithm = RS256
4877 sasl.oauthbearer.assertion.claim.aud = null
4878 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4879 sasl.oauthbearer.assertion.claim.iss = null
4880 sasl.oauthbearer.assertion.claim.jti.include = false
4881 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4882 sasl.oauthbearer.assertion.claim.sub = null
4883 sasl.oauthbearer.assertion.file = null
4884 sasl.oauthbearer.assertion.private.key.file = null
4885 sasl.oauthbearer.assertion.private.key.passphrase = null
4886 sasl.oauthbearer.assertion.template.file = null
4887 sasl.oauthbearer.client.credentials.client.id = null
4888 sasl.oauthbearer.client.credentials.client.secret = null
4889 sasl.oauthbearer.clock.skew.seconds = 30
4890 sasl.oauthbearer.expected.audience = null
4891 sasl.oauthbearer.expected.issuer = null
4892 sasl.oauthbearer.header.urlencode = false
4893 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4894 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4895 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4896 sasl.oauthbearer.jwks.endpoint.url = null
4897 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4898 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4899 sasl.oauthbearer.scope = null
4900 sasl.oauthbearer.scope.claim.name = scope
4901 sasl.oauthbearer.sub.claim.name = sub
4902 sasl.oauthbearer.token.endpoint.url = null
4903 security.protocol = PLAINTEXT
4904 security.providers = null
4905 send.buffer.bytes = 131072
4906 session.timeout.ms = 45000
4907 share.acknowledgement.mode = implicit
4908 socket.connection.setup.timeout.max.ms = 30000
4909 socket.connection.setup.timeout.ms = 10000
4910 ssl.cipher.suites = null
4911 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4912 ssl.endpoint.identification.algorithm = https
4913 ssl.engine.factory.class = null
4914 ssl.key.password = null
4915 ssl.keymanager.algorithm = SunX509
4916 ssl.keystore.certificate.chain = null
4917 ssl.keystore.key = null
4918 ssl.keystore.location = null
4919 ssl.keystore.password = null
4920 ssl.keystore.type = JKS
4921 ssl.protocol = TLSv1.3
4922 ssl.provider = null
4923 ssl.secure.random.implementation = null
4924 ssl.trustmanager.algorithm = PKIX
4925 ssl.truststore.certificates = null
4926 ssl.truststore.location = null
4927 ssl.truststore.password = null
4928 ssl.truststore.type = JKS
4929 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4930
493115:21:11.442 [virtual-620] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4932 allow.auto.create.topics = true
4933 auto.commit.interval.ms = 5000
4934 auto.offset.reset = earliest
4935 bootstrap.servers = [localhost:6001]
4936 check.crcs = true
4937 client.dns.lookup = use_all_dns_ips
4938 client.id = consumer-g3_1-4
4939 client.rack =
4940 connections.max.idle.ms = 540000
4941 default.api.timeout.ms = 60000
4942 enable.auto.commit = false
4943 enable.metrics.push = true
4944 exclude.internal.topics = true
4945 fetch.max.bytes = 52428800
4946 fetch.max.wait.ms = 500
4947 fetch.min.bytes = 1
4948 group.id = g3_1
4949 group.instance.id = null
4950 group.protocol = classic
4951 group.remote.assignor = null
4952 heartbeat.interval.ms = 3000
4953 interceptor.classes = []
4954 internal.leave.group.on.close = true
4955 internal.throw.on.fetch.stable.offset.unsupported = false
4956 isolation.level = read_uncommitted
4957 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4958 max.partition.fetch.bytes = 1048576
4959 max.poll.interval.ms = 300000
4960 max.poll.records = 500
4961 metadata.max.age.ms = 300000
4962 metadata.recovery.rebootstrap.trigger.ms = 300000
4963 metadata.recovery.strategy = rebootstrap
4964 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4965 metrics.num.samples = 2
4966 metrics.recording.level = INFO
4967 metrics.sample.window.ms = 30000
4968 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4969 receive.buffer.bytes = 65536
4970 reconnect.backoff.max.ms = 1000
4971 reconnect.backoff.ms = 50
4972 request.timeout.ms = 30000
4973 retry.backoff.max.ms = 1000
4974 retry.backoff.ms = 100
4975 sasl.client.callback.handler.class = null
4976 sasl.jaas.config = null
4977 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4978 sasl.kerberos.min.time.before.relogin = 60000
4979 sasl.kerberos.service.name = null
4980 sasl.kerberos.ticket.renew.jitter = 0.05
4981 sasl.kerberos.ticket.renew.window.factor = 0.8
4982 sasl.login.callback.handler.class = null
4983 sasl.login.class = null
4984 sasl.login.connect.timeout.ms = null
4985 sasl.login.read.timeout.ms = null
4986 sasl.login.refresh.buffer.seconds = 300
4987 sasl.login.refresh.min.period.seconds = 60
4988 sasl.login.refresh.window.factor = 0.8
4989 sasl.login.refresh.window.jitter = 0.05
4990 sasl.login.retry.backoff.max.ms = 10000
4991 sasl.login.retry.backoff.ms = 100
4992 sasl.mechanism = GSSAPI
4993 sasl.oauthbearer.assertion.algorithm = RS256
4994 sasl.oauthbearer.assertion.claim.aud = null
4995 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4996 sasl.oauthbearer.assertion.claim.iss = null
4997 sasl.oauthbearer.assertion.claim.jti.include = false
4998 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4999 sasl.oauthbearer.assertion.claim.sub = null
5000 sasl.oauthbearer.assertion.file = null
5001 sasl.oauthbearer.assertion.private.key.file = null
5002 sasl.oauthbearer.assertion.private.key.passphrase = null
5003 sasl.oauthbearer.assertion.template.file = null
5004 sasl.oauthbearer.client.credentials.client.id = null
5005 sasl.oauthbearer.client.credentials.client.secret = null
5006 sasl.oauthbearer.clock.skew.seconds = 30
5007 sasl.oauthbearer.expected.audience = null
5008 sasl.oauthbearer.expected.issuer = null
5009 sasl.oauthbearer.header.urlencode = false
5010 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5011 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5012 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5013 sasl.oauthbearer.jwks.endpoint.url = null
5014 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5015 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5016 sasl.oauthbearer.scope = null
5017 sasl.oauthbearer.scope.claim.name = scope
5018 sasl.oauthbearer.sub.claim.name = sub
5019 sasl.oauthbearer.token.endpoint.url = null
5020 security.protocol = PLAINTEXT
5021 security.providers = null
5022 send.buffer.bytes = 131072
5023 session.timeout.ms = 45000
5024 share.acknowledgement.mode = implicit
5025 socket.connection.setup.timeout.max.ms = 30000
5026 socket.connection.setup.timeout.ms = 10000
5027 ssl.cipher.suites = null
5028 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5029 ssl.endpoint.identification.algorithm = https
5030 ssl.engine.factory.class = null
5031 ssl.key.password = null
5032 ssl.keymanager.algorithm = SunX509
5033 ssl.keystore.certificate.chain = null
5034 ssl.keystore.key = null
5035 ssl.keystore.location = null
5036 ssl.keystore.password = null
5037 ssl.keystore.type = JKS
5038 ssl.protocol = TLSv1.3
5039 ssl.provider = null
5040 ssl.secure.random.implementation = null
5041 ssl.trustmanager.algorithm = PKIX
5042 ssl.truststore.certificates = null
5043 ssl.truststore.location = null
5044 ssl.truststore.password = null
5045 ssl.truststore.type = JKS
5046 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5047
504815:21:11.442 [virtual-618] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
504915:21:11.442 [virtual-620] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
505015:21:11.445 [virtual-618] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505115:21:11.445 [virtual-618] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505215:21:11.445 [virtual-618] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314071445
505315:21:11.445 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505415:21:11.445 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505515:21:11.446 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314071445
505615:21:11.446 [virtual-623] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Subscribed to topic(s): t3_2
505715:21:11.447 [virtual-620] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5058 acks = -1
5059 batch.size = 16384
5060 bootstrap.servers = [localhost:6001]
5061 buffer.memory = 33554432
5062 client.dns.lookup = use_all_dns_ips
5063 client.id = producer-9
5064 compression.gzip.level = -1
5065 compression.lz4.level = 9
5066 compression.type = none
5067 compression.zstd.level = 3
5068 connections.max.idle.ms = 540000
5069 delivery.timeout.ms = 120000
5070 enable.idempotence = true
5071 enable.metrics.push = true
5072 interceptor.classes = []
5073 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5074 linger.ms = 5
5075 max.block.ms = 60000
5076 max.in.flight.requests.per.connection = 5
5077 max.request.size = 1048576
5078 metadata.max.age.ms = 300000
5079 metadata.max.idle.ms = 300000
5080 metadata.recovery.rebootstrap.trigger.ms = 300000
5081 metadata.recovery.strategy = rebootstrap
5082 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5083 metrics.num.samples = 2
5084 metrics.recording.level = INFO
5085 metrics.sample.window.ms = 30000
5086 partitioner.adaptive.partitioning.enable = true
5087 partitioner.availability.timeout.ms = 0
5088 partitioner.class = null
5089 partitioner.ignore.keys = false
5090 receive.buffer.bytes = 32768
5091 reconnect.backoff.max.ms = 1000
5092 reconnect.backoff.ms = 50
5093 request.timeout.ms = 30000
5094 retries = 2147483647
5095 retry.backoff.max.ms = 1000
5096 retry.backoff.ms = 100
5097 sasl.client.callback.handler.class = null
5098 sasl.jaas.config = null
5099 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5100 sasl.kerberos.min.time.before.relogin = 60000
5101 sasl.kerberos.service.name = null
5102 sasl.kerberos.ticket.renew.jitter = 0.05
5103 sasl.kerberos.ticket.renew.window.factor = 0.8
5104 sasl.login.callback.handler.class = null
5105 sasl.login.class = null
5106 sasl.login.connect.timeout.ms = null
5107 sasl.login.read.timeout.ms = null
5108 sasl.login.refresh.buffer.seconds = 300
5109 sasl.login.refresh.min.period.seconds = 60
5110 sasl.login.refresh.window.factor = 0.8
5111 sasl.login.refresh.window.jitter = 0.05
5112 sasl.login.retry.backoff.max.ms = 10000
5113 sasl.login.retry.backoff.ms = 100
5114 sasl.mechanism = GSSAPI
5115 sasl.oauthbearer.assertion.algorithm = RS256
5116 sasl.oauthbearer.assertion.claim.aud = null
5117 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5118 sasl.oauthbearer.assertion.claim.iss = null
5119 sasl.oauthbearer.assertion.claim.jti.include = false
5120 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5121 sasl.oauthbearer.assertion.claim.sub = null
5122 sasl.oauthbearer.assertion.file = null
5123 sasl.oauthbearer.assertion.private.key.file = null
5124 sasl.oauthbearer.assertion.private.key.passphrase = null
5125 sasl.oauthbearer.assertion.template.file = null
5126 sasl.oauthbearer.client.credentials.client.id = null
5127 sasl.oauthbearer.client.credentials.client.secret = null
5128 sasl.oauthbearer.clock.skew.seconds = 30
5129 sasl.oauthbearer.expected.audience = null
5130 sasl.oauthbearer.expected.issuer = null
5131 sasl.oauthbearer.header.urlencode = false
5132 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5133 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5134 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5135 sasl.oauthbearer.jwks.endpoint.url = null
5136 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5137 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5138 sasl.oauthbearer.scope = null
5139 sasl.oauthbearer.scope.claim.name = scope
5140 sasl.oauthbearer.sub.claim.name = sub
5141 sasl.oauthbearer.token.endpoint.url = null
5142 security.protocol = PLAINTEXT
5143 security.providers = null
5144 send.buffer.bytes = 131072
5145 socket.connection.setup.timeout.max.ms = 30000
5146 socket.connection.setup.timeout.ms = 10000
5147 ssl.cipher.suites = null
5148 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5149 ssl.endpoint.identification.algorithm = https
5150 ssl.engine.factory.class = null
5151 ssl.key.password = null
5152 ssl.keymanager.algorithm = SunX509
5153 ssl.keystore.certificate.chain = null
5154 ssl.keystore.key = null
5155 ssl.keystore.location = null
5156 ssl.keystore.password = null
5157 ssl.keystore.type = JKS
5158 ssl.protocol = TLSv1.3
5159 ssl.provider = null
5160 ssl.secure.random.implementation = null
5161 ssl.trustmanager.algorithm = PKIX
5162 ssl.truststore.certificates = null
5163 ssl.truststore.location = null
5164 ssl.truststore.password = null
5165 ssl.truststore.type = JKS
5166 transaction.timeout.ms = 60000
5167 transaction.two.phase.commit.enable = false
5168 transactional.id = null
5169 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5170
517115:21:11.447 [virtual-620] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
517215:21:11.448 [virtual-620] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Instantiated an idempotent producer.
517315:21:11.451 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
517415:21:11.451 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
517515:21:11.451 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314071451
517615:21:11.452 [virtual-624] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Subscribed to topic(s): t3_1
517715:21:11.454 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_2) to the active controller.
517815:21:11.457 [virtual-623] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t3_2=UNKNOWN_TOPIC_OR_PARTITION}
517915:21:11.458 [virtual-623] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
518015:21:11.459 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.Metadata - [Producer clientId=producer-9] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
518115:21:11.460 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-9] ProducerId set to 8 with epoch 0
518215:21:11.460 [virtual-624] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
518315:21:11.461 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
518415:21:11.461 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
518515:21:11.461 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_2 with topic ID x4-D65SMTiyVcAL5NJ-LGA.
518615:21:11.461 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
518715:21:11.462 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_2-0 with topic ID x4-D65SMTiyVcAL5NJ-LGA and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
518815:21:11.462 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
518915:21:11.464 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
519015:21:11.467 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3 and requesting the member to rejoin with this id.
519115:21:11.467 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f and requesting the member to rejoin with this id.
519215:21:11.467 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3
519315:21:11.468 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
519415:21:11.468 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f
519515:21:11.469 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
519615:21:11.469 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3 joins group g3_1 in Empty state. Adding to the group now.
519715:21:11.469 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3).
519815:21:11.470 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f joins group g3_1 in PreparingRebalance state. Adding to the group now.
519915:21:11.487 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
520015:21:11.488 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_2-0)
520115:21:11.488 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_2-0 with topic id x4-D65SMTiyVcAL5NJ-LGA.
520215:21:11.490 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_2-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
520315:21:11.490 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_2-0 in /tmp/kafka-logs5982689497894266552/t3_2-0 with properties {}
520415:21:11.491 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] No checkpointed highwatermark is found for partition t3_2-0
520515:21:11.491 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] Log loaded for partition t3_2-0 with initial high watermark 0
520615:21:11.491 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_2-0 with topic id Some(x4-D65SMTiyVcAL5NJ-LGA) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
520715:21:17.471 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 1 with 2 members.
520815:21:17.471 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3', protocol='range'}
520915:21:17.472 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f', protocol='range'}
521015:21:17.474 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Finished assignment for group at generation 1: {consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3=Assignment(partitions=[t3_2-0]), consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f=Assignment(partitions=[t3_1-0])}
521115:21:17.474 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3 for group g3_1 for generation 1. The group has 2 members, 0 of which are static.
521215:21:17.481 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3', protocol='range'}
521315:21:17.481 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f', protocol='range'}
521415:21:17.481 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_2-0])
521515:21:17.481 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
521615:21:17.481 [virtual-624] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
521715:21:17.481 [virtual-623] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Adding newly assigned partitions: [t3_2-0]
521815:21:17.483 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Found no committed offset for partition t3_1-0
521915:21:17.483 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Found no committed offset for partition t3_2-0
522015:21:17.485 [virtual-623] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting offset for partition t3_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522115:21:17.486 [virtual-624] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522215:21:19.501 [virtual-622] ERROR o.k.KafkaFlow$ - Exception when polling for records
5223java.lang.InterruptedException: null
5224 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5225 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5226 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5227 at ox.channels.ActorRef.ask(actor.scala:64)
5228 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5229 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5230 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5231 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5232 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5233 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5234 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5235 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5236 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5237 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
523815:21:19.502 [virtual-623] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5239java.lang.InterruptedException: null
5240 ... 18 common frames omitted
5241Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5242 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5243 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5244 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5245 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5246 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5247 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5248 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5249 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5250 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5251 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5252 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5253 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5254 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5255 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5256 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5257 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5258 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5259 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
526015:21:19.502 [virtual-627] ERROR o.k.KafkaFlow$ - Exception when polling for records
5261java.lang.InterruptedException: null
5262 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5263 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5264 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5265 at ox.channels.ActorRef.ask(actor.scala:64)
5266 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5267 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
5268 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5269 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5270 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
5271 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5272 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5273 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
5274 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
5275 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5276 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
5277 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
5278 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
5279 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
5280 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5281 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5282 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
528315:21:19.502 [virtual-624] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5284java.lang.InterruptedException: null
5285 ... 18 common frames omitted
5286Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5287 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5288 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5289 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5290 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5291 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5292 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5293 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5294 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5295 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5296 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5297 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5298 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5299 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5300 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5301 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5302 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5303 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5304 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
530515:21:19.502 [virtual-634] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Revoke previously assigned partitions [t3_2-0]
530615:21:19.503 [virtual-634] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Member consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
530715:21:19.503 [virtual-634] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
530815:21:19.503 [virtual-634] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
530915:21:19.503 [virtual-635] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
531015:21:19.503 [virtual-635] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Member consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
531115:21:19.503 [virtual-635] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
531215:21:19.503 [virtual-635] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
531315:21:19.504 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
531415:21:19.504 [virtual-636] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
531515:21:19.504 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_1-3-34521706-738c-421f-b856-3a78edb772f3) members.).
531615:21:19.504 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-4-dded5dd0-413a-4430-86c7-8c6065b9377f has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
531715:21:19.507 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 2 is now empty.
531815:21:19.508 [virtual-636] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
531915:21:19.508 [virtual-636] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
532015:21:19.508 [virtual-636] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
532115:21:19.508 [virtual-636] INFO o.a.k.c.m.Metrics - Metrics reporters closed
532215:21:19.508 [virtual-636] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-9 unregistered
532315:21:19.508 [virtual-634] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
532415:21:19.509 [virtual-634] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
532515:21:19.509 [virtual-634] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
532615:21:19.509 [virtual-634] INFO o.a.k.c.m.Metrics - Metrics reporters closed
532715:21:19.511 [virtual-634] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-3 unregistered
532815:21:19.996 [virtual-635] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
532915:21:19.996 [virtual-635] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
533015:21:19.996 [virtual-635] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
533115:21:19.996 [virtual-635] INFO o.a.k.c.m.Metrics - Metrics reporters closed
533215:21:19.998 [virtual-635] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-4 unregistered
533315:21:19.999 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5334 acks = -1
5335 batch.size = 16384
5336 bootstrap.servers = [localhost:6001]
5337 buffer.memory = 33554432
5338 client.dns.lookup = use_all_dns_ips
5339 client.id = producer-10
5340 compression.gzip.level = -1
5341 compression.lz4.level = 9
5342 compression.type = none
5343 compression.zstd.level = 3
5344 connections.max.idle.ms = 540000
5345 delivery.timeout.ms = 120000
5346 enable.idempotence = true
5347 enable.metrics.push = true
5348 interceptor.classes = []
5349 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5350 linger.ms = 5
5351 max.block.ms = 10000
5352 max.in.flight.requests.per.connection = 5
5353 max.request.size = 1048576
5354 metadata.max.age.ms = 300000
5355 metadata.max.idle.ms = 300000
5356 metadata.recovery.rebootstrap.trigger.ms = 300000
5357 metadata.recovery.strategy = rebootstrap
5358 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5359 metrics.num.samples = 2
5360 metrics.recording.level = INFO
5361 metrics.sample.window.ms = 30000
5362 partitioner.adaptive.partitioning.enable = true
5363 partitioner.availability.timeout.ms = 0
5364 partitioner.class = null
5365 partitioner.ignore.keys = false
5366 receive.buffer.bytes = 32768
5367 reconnect.backoff.max.ms = 1000
5368 reconnect.backoff.ms = 50
5369 request.timeout.ms = 30000
5370 retries = 2147483647
5371 retry.backoff.max.ms = 1000
5372 retry.backoff.ms = 1000
5373 sasl.client.callback.handler.class = null
5374 sasl.jaas.config = null
5375 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5376 sasl.kerberos.min.time.before.relogin = 60000
5377 sasl.kerberos.service.name = null
5378 sasl.kerberos.ticket.renew.jitter = 0.05
5379 sasl.kerberos.ticket.renew.window.factor = 0.8
5380 sasl.login.callback.handler.class = null
5381 sasl.login.class = null
5382 sasl.login.connect.timeout.ms = null
5383 sasl.login.read.timeout.ms = null
5384 sasl.login.refresh.buffer.seconds = 300
5385 sasl.login.refresh.min.period.seconds = 60
5386 sasl.login.refresh.window.factor = 0.8
5387 sasl.login.refresh.window.jitter = 0.05
5388 sasl.login.retry.backoff.max.ms = 10000
5389 sasl.login.retry.backoff.ms = 100
5390 sasl.mechanism = GSSAPI
5391 sasl.oauthbearer.assertion.algorithm = RS256
5392 sasl.oauthbearer.assertion.claim.aud = null
5393 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5394 sasl.oauthbearer.assertion.claim.iss = null
5395 sasl.oauthbearer.assertion.claim.jti.include = false
5396 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5397 sasl.oauthbearer.assertion.claim.sub = null
5398 sasl.oauthbearer.assertion.file = null
5399 sasl.oauthbearer.assertion.private.key.file = null
5400 sasl.oauthbearer.assertion.private.key.passphrase = null
5401 sasl.oauthbearer.assertion.template.file = null
5402 sasl.oauthbearer.client.credentials.client.id = null
5403 sasl.oauthbearer.client.credentials.client.secret = null
5404 sasl.oauthbearer.clock.skew.seconds = 30
5405 sasl.oauthbearer.expected.audience = null
5406 sasl.oauthbearer.expected.issuer = null
5407 sasl.oauthbearer.header.urlencode = false
5408 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5409 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5410 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5411 sasl.oauthbearer.jwks.endpoint.url = null
5412 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5413 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5414 sasl.oauthbearer.scope = null
5415 sasl.oauthbearer.scope.claim.name = scope
5416 sasl.oauthbearer.sub.claim.name = sub
5417 sasl.oauthbearer.token.endpoint.url = null
5418 security.protocol = PLAINTEXT
5419 security.providers = null
5420 send.buffer.bytes = 131072
5421 socket.connection.setup.timeout.max.ms = 30000
5422 socket.connection.setup.timeout.ms = 10000
5423 ssl.cipher.suites = null
5424 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5425 ssl.endpoint.identification.algorithm = https
5426 ssl.engine.factory.class = null
5427 ssl.key.password = null
5428 ssl.keymanager.algorithm = SunX509
5429 ssl.keystore.certificate.chain = null
5430 ssl.keystore.key = null
5431 ssl.keystore.location = null
5432 ssl.keystore.password = null
5433 ssl.keystore.type = JKS
5434 ssl.protocol = TLSv1.3
5435 ssl.provider = null
5436 ssl.secure.random.implementation = null
5437 ssl.trustmanager.algorithm = PKIX
5438 ssl.truststore.certificates = null
5439 ssl.truststore.location = null
5440 ssl.truststore.password = null
5441 ssl.truststore.type = JKS
5442 transaction.timeout.ms = 60000
5443 transaction.two.phase.commit.enable = false
5444 transactional.id = null
5445 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5446
544715:21:19.999 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
544815:21:20.000 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Instantiated an idempotent producer.
544915:21:20.002 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
545015:21:20.002 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
545115:21:20.002 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314080002
545215:21:20.005 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.Metadata - [Producer clientId=producer-10] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
545315:21:20.005 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-10] ProducerId set to 9 with epoch 0
545415:21:20.014 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
545515:21:20.016 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
545615:21:20.016 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
545715:21:20.016 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
545815:21:20.016 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
545915:21:20.017 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-10 unregistered
546015:21:20.018 [virtual-638] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5461 allow.auto.create.topics = true
5462 auto.commit.interval.ms = 5000
5463 auto.offset.reset = earliest
5464 bootstrap.servers = [localhost:6001]
5465 check.crcs = true
5466 client.dns.lookup = use_all_dns_ips
5467 client.id = consumer-g3_1-5
5468 client.rack =
5469 connections.max.idle.ms = 540000
5470 default.api.timeout.ms = 60000
5471 enable.auto.commit = false
5472 enable.metrics.push = true
5473 exclude.internal.topics = true
5474 fetch.max.bytes = 52428800
5475 fetch.max.wait.ms = 500
5476 fetch.min.bytes = 1
5477 group.id = g3_1
5478 group.instance.id = null
5479 group.protocol = classic
5480 group.remote.assignor = null
5481 heartbeat.interval.ms = 3000
5482 interceptor.classes = []
5483 internal.leave.group.on.close = true
5484 internal.throw.on.fetch.stable.offset.unsupported = false
5485 isolation.level = read_uncommitted
5486 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5487 max.partition.fetch.bytes = 1048576
5488 max.poll.interval.ms = 300000
5489 max.poll.records = 500
5490 metadata.max.age.ms = 300000
5491 metadata.recovery.rebootstrap.trigger.ms = 300000
5492 metadata.recovery.strategy = rebootstrap
5493 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5494 metrics.num.samples = 2
5495 metrics.recording.level = INFO
5496 metrics.sample.window.ms = 30000
5497 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5498 receive.buffer.bytes = 65536
5499 reconnect.backoff.max.ms = 1000
5500 reconnect.backoff.ms = 50
5501 request.timeout.ms = 30000
5502 retry.backoff.max.ms = 1000
5503 retry.backoff.ms = 100
5504 sasl.client.callback.handler.class = null
5505 sasl.jaas.config = null
5506 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5507 sasl.kerberos.min.time.before.relogin = 60000
5508 sasl.kerberos.service.name = null
5509 sasl.kerberos.ticket.renew.jitter = 0.05
5510 sasl.kerberos.ticket.renew.window.factor = 0.8
5511 sasl.login.callback.handler.class = null
5512 sasl.login.class = null
5513 sasl.login.connect.timeout.ms = null
5514 sasl.login.read.timeout.ms = null
5515 sasl.login.refresh.buffer.seconds = 300
5516 sasl.login.refresh.min.period.seconds = 60
5517 sasl.login.refresh.window.factor = 0.8
5518 sasl.login.refresh.window.jitter = 0.05
5519 sasl.login.retry.backoff.max.ms = 10000
5520 sasl.login.retry.backoff.ms = 100
5521 sasl.mechanism = GSSAPI
5522 sasl.oauthbearer.assertion.algorithm = RS256
5523 sasl.oauthbearer.assertion.claim.aud = null
5524 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5525 sasl.oauthbearer.assertion.claim.iss = null
5526 sasl.oauthbearer.assertion.claim.jti.include = false
5527 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5528 sasl.oauthbearer.assertion.claim.sub = null
5529 sasl.oauthbearer.assertion.file = null
5530 sasl.oauthbearer.assertion.private.key.file = null
5531 sasl.oauthbearer.assertion.private.key.passphrase = null
5532 sasl.oauthbearer.assertion.template.file = null
5533 sasl.oauthbearer.client.credentials.client.id = null
5534 sasl.oauthbearer.client.credentials.client.secret = null
5535 sasl.oauthbearer.clock.skew.seconds = 30
5536 sasl.oauthbearer.expected.audience = null
5537 sasl.oauthbearer.expected.issuer = null
5538 sasl.oauthbearer.header.urlencode = false
5539 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5540 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5541 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5542 sasl.oauthbearer.jwks.endpoint.url = null
5543 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5544 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5545 sasl.oauthbearer.scope = null
5546 sasl.oauthbearer.scope.claim.name = scope
5547 sasl.oauthbearer.sub.claim.name = sub
5548 sasl.oauthbearer.token.endpoint.url = null
5549 security.protocol = PLAINTEXT
5550 security.providers = null
5551 send.buffer.bytes = 131072
5552 session.timeout.ms = 45000
5553 share.acknowledgement.mode = implicit
5554 socket.connection.setup.timeout.max.ms = 30000
5555 socket.connection.setup.timeout.ms = 10000
5556 ssl.cipher.suites = null
5557 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5558 ssl.endpoint.identification.algorithm = https
5559 ssl.engine.factory.class = null
5560 ssl.key.password = null
5561 ssl.keymanager.algorithm = SunX509
5562 ssl.keystore.certificate.chain = null
5563 ssl.keystore.key = null
5564 ssl.keystore.location = null
5565 ssl.keystore.password = null
5566 ssl.keystore.type = JKS
5567 ssl.protocol = TLSv1.3
5568 ssl.provider = null
5569 ssl.secure.random.implementation = null
5570 ssl.trustmanager.algorithm = PKIX
5571 ssl.truststore.certificates = null
5572 ssl.truststore.location = null
5573 ssl.truststore.password = null
5574 ssl.truststore.type = JKS
5575 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5576
557715:21:20.019 [virtual-638] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
557815:21:20.022 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
557915:21:20.022 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
558015:21:20.022 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314080022
558115:21:20.023 [virtual-641] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Subscribed to topic(s): t3_1
558215:21:20.025 [virtual-641] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
558315:21:20.026 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
558415:21:20.027 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
558515:21:20.029 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9 and requesting the member to rejoin with this id.
558615:21:20.029 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9
558715:21:20.029 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
558815:21:20.030 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9 joins group g3_1 in Empty state. Adding to the group now.
558915:21:20.030 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9).
559015:21:23.030 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 3 with 1 members.
559115:21:23.031 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9', protocol='range'}
559215:21:23.031 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Finished assignment for group at generation 3: {consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9=Assignment(partitions=[t3_1-0])}
559315:21:23.032 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9 for group g3_1 for generation 3. The group has 1 members, 0 of which are static.
559415:21:23.038 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9', protocol='range'}
559515:21:23.038 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
559615:21:23.038 [virtual-641] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
559715:21:23.040 [virtual-641] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t3_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
559815:21:23.044 [virtual-638] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5599 allow.auto.create.topics = true
5600 auto.commit.interval.ms = 5000
5601 auto.offset.reset = earliest
5602 bootstrap.servers = [localhost:6001]
5603 check.crcs = true
5604 client.dns.lookup = use_all_dns_ips
5605 client.id = consumer-g3_2-6
5606 client.rack =
5607 connections.max.idle.ms = 540000
5608 default.api.timeout.ms = 60000
5609 enable.auto.commit = false
5610 enable.metrics.push = true
5611 exclude.internal.topics = true
5612 fetch.max.bytes = 52428800
5613 fetch.max.wait.ms = 500
5614 fetch.min.bytes = 1
5615 group.id = g3_2
5616 group.instance.id = null
5617 group.protocol = classic
5618 group.remote.assignor = null
5619 heartbeat.interval.ms = 3000
5620 interceptor.classes = []
5621 internal.leave.group.on.close = true
5622 internal.throw.on.fetch.stable.offset.unsupported = false
5623 isolation.level = read_uncommitted
5624 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5625 max.partition.fetch.bytes = 1048576
5626 max.poll.interval.ms = 300000
5627 max.poll.records = 500
5628 metadata.max.age.ms = 300000
5629 metadata.recovery.rebootstrap.trigger.ms = 300000
5630 metadata.recovery.strategy = rebootstrap
5631 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5632 metrics.num.samples = 2
5633 metrics.recording.level = INFO
5634 metrics.sample.window.ms = 30000
5635 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5636 receive.buffer.bytes = 65536
5637 reconnect.backoff.max.ms = 1000
5638 reconnect.backoff.ms = 50
5639 request.timeout.ms = 30000
5640 retry.backoff.max.ms = 1000
5641 retry.backoff.ms = 100
5642 sasl.client.callback.handler.class = null
5643 sasl.jaas.config = null
5644 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5645 sasl.kerberos.min.time.before.relogin = 60000
5646 sasl.kerberos.service.name = null
5647 sasl.kerberos.ticket.renew.jitter = 0.05
5648 sasl.kerberos.ticket.renew.window.factor = 0.8
5649 sasl.login.callback.handler.class = null
5650 sasl.login.class = null
5651 sasl.login.connect.timeout.ms = null
5652 sasl.login.read.timeout.ms = null
5653 sasl.login.refresh.buffer.seconds = 300
5654 sasl.login.refresh.min.period.seconds = 60
5655 sasl.login.refresh.window.factor = 0.8
5656 sasl.login.refresh.window.jitter = 0.05
5657 sasl.login.retry.backoff.max.ms = 10000
5658 sasl.login.retry.backoff.ms = 100
5659 sasl.mechanism = GSSAPI
5660 sasl.oauthbearer.assertion.algorithm = RS256
5661 sasl.oauthbearer.assertion.claim.aud = null
5662 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5663 sasl.oauthbearer.assertion.claim.iss = null
5664 sasl.oauthbearer.assertion.claim.jti.include = false
5665 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5666 sasl.oauthbearer.assertion.claim.sub = null
5667 sasl.oauthbearer.assertion.file = null
5668 sasl.oauthbearer.assertion.private.key.file = null
5669 sasl.oauthbearer.assertion.private.key.passphrase = null
5670 sasl.oauthbearer.assertion.template.file = null
5671 sasl.oauthbearer.client.credentials.client.id = null
5672 sasl.oauthbearer.client.credentials.client.secret = null
5673 sasl.oauthbearer.clock.skew.seconds = 30
5674 sasl.oauthbearer.expected.audience = null
5675 sasl.oauthbearer.expected.issuer = null
5676 sasl.oauthbearer.header.urlencode = false
5677 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5678 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5679 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5680 sasl.oauthbearer.jwks.endpoint.url = null
5681 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5682 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5683 sasl.oauthbearer.scope = null
5684 sasl.oauthbearer.scope.claim.name = scope
5685 sasl.oauthbearer.sub.claim.name = sub
5686 sasl.oauthbearer.token.endpoint.url = null
5687 security.protocol = PLAINTEXT
5688 security.providers = null
5689 send.buffer.bytes = 131072
5690 session.timeout.ms = 45000
5691 share.acknowledgement.mode = implicit
5692 socket.connection.setup.timeout.max.ms = 30000
5693 socket.connection.setup.timeout.ms = 10000
5694 ssl.cipher.suites = null
5695 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5696 ssl.endpoint.identification.algorithm = https
5697 ssl.engine.factory.class = null
5698 ssl.key.password = null
5699 ssl.keymanager.algorithm = SunX509
5700 ssl.keystore.certificate.chain = null
5701 ssl.keystore.key = null
5702 ssl.keystore.location = null
5703 ssl.keystore.password = null
5704 ssl.keystore.type = JKS
5705 ssl.protocol = TLSv1.3
5706 ssl.provider = null
5707 ssl.secure.random.implementation = null
5708 ssl.trustmanager.algorithm = PKIX
5709 ssl.truststore.certificates = null
5710 ssl.truststore.location = null
5711 ssl.truststore.password = null
5712 ssl.truststore.type = JKS
5713 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5714
571515:21:23.044 [virtual-638] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
571615:21:23.047 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
571715:21:23.047 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
571815:21:23.047 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314083047
571915:21:23.048 [virtual-645] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Subscribed to topic(s): t3_1
572015:21:23.051 [virtual-645] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
572115:21:23.052 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
572215:21:23.052 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
572315:21:23.055 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_2 in Empty state. Created a new member id consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f and requesting the member to rejoin with this id.
572415:21:23.055 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: need to re-join with the given member-id: consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f
572515:21:23.055 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
572615:21:23.056 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f joins group g3_2 in Empty state. Adding to the group now.
572715:21:23.056 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f).
572815:21:26.056 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_2 generation 1 with 1 members.
572915:21:26.057 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f', protocol='range'}
573015:21:26.057 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Finished assignment for group at generation 1: {consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f=Assignment(partitions=[t3_1-0])}
573115:21:26.058 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f for group g3_2 for generation 1. The group has 1 members, 0 of which are static.
573215:21:26.070 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f', protocol='range'}
573315:21:26.070 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Notifying assignor about the new Assignment(partitions=[t3_1-0])
573415:21:26.070 [virtual-645] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Adding newly assigned partitions: [t3_1-0]
573515:21:26.071 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Found no committed offset for partition t3_1-0
573615:21:26.074 [virtual-645] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
573715:21:26.077 [virtual-644] ERROR o.k.KafkaFlow$ - Exception when polling for records
5738java.lang.InterruptedException: null
5739 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5740 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5741 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5742 at ox.channels.ActorRef.ask(actor.scala:64)
5743 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5744 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5745 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5746 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5747 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5748 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5749 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5750 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5751 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5752 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
575315:21:26.077 [virtual-640] ERROR o.k.KafkaFlow$ - Exception when polling for records
5754java.lang.InterruptedException: null
5755 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5756 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5757 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5758 at ox.channels.ActorRef.ask(actor.scala:64)
5759 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5760 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5761 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5762 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5763 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5764 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5765 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5766 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5767 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5768 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
576915:21:26.077 [virtual-645] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5770java.lang.InterruptedException: null
5771 ... 18 common frames omitted
5772Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5773 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5774 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5775 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5776 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5777 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5778 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5779 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5780 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5781 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5782 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5783 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5784 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5785 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5786 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5787 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5788 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5789 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5790 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
579115:21:26.077 [virtual-641] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5792java.lang.InterruptedException: null
5793 ... 18 common frames omitted
5794Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5795 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5796 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5797 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5798 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5799 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5800 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5801 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5802 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5803 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5804 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5805 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5806 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5807 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5808 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5809 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5810 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5811 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5812 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
581315:21:26.078 [virtual-647] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
581415:21:26.079 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Member consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
581515:21:26.079 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
581615:21:26.079 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
581715:21:26.079 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
581815:21:26.079 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g3_1-5-36c61b5a-9ee7-4c47-8508-26b4b0fcd0b9) members.).
581915:21:26.079 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 4 is now empty.
582015:21:26.079 [virtual-648] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Revoke previously assigned partitions [t3_1-0]
582115:21:26.079 [virtual-648] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Member consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
582215:21:26.080 [virtual-648] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting generation and member id due to: consumer pro-actively leaving the group
582315:21:26.080 [virtual-648] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: consumer pro-actively leaving the group
582415:21:26.080 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_2] Member consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
582515:21:26.080 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_2-6-e128ec1e-2431-449b-b1c3-aceabcfc031f) members.).
582615:21:26.080 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_2 with generation 2 is now empty.
582715:21:26.555 [virtual-647] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
582815:21:26.555 [virtual-647] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
582915:21:26.555 [virtual-647] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
583015:21:26.555 [virtual-647] INFO o.a.k.c.m.Metrics - Metrics reporters closed
583115:21:26.557 [virtual-647] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-5 unregistered
583215:21:26.578 [virtual-648] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
583315:21:26.578 [virtual-648] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
583415:21:26.578 [virtual-648] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
583515:21:26.578 [virtual-648] INFO o.a.k.c.m.Metrics - Metrics reporters closed
583615:21:26.580 [virtual-648] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_2-6 unregistered
583715:21:26.583 [virtual-649] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5838 acks = -1
5839 batch.size = 16384
5840 bootstrap.servers = [localhost:6001]
5841 buffer.memory = 33554432
5842 client.dns.lookup = use_all_dns_ips
5843 client.id = producer-11
5844 compression.gzip.level = -1
5845 compression.lz4.level = 9
5846 compression.type = none
5847 compression.zstd.level = 3
5848 connections.max.idle.ms = 540000
5849 delivery.timeout.ms = 120000
5850 enable.idempotence = true
5851 enable.metrics.push = true
5852 interceptor.classes = []
5853 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5854 linger.ms = 5
5855 max.block.ms = 60000
5856 max.in.flight.requests.per.connection = 5
5857 max.request.size = 1048576
5858 metadata.max.age.ms = 300000
5859 metadata.max.idle.ms = 300000
5860 metadata.recovery.rebootstrap.trigger.ms = 300000
5861 metadata.recovery.strategy = rebootstrap
5862 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5863 metrics.num.samples = 2
5864 metrics.recording.level = INFO
5865 metrics.sample.window.ms = 30000
5866 partitioner.adaptive.partitioning.enable = true
5867 partitioner.availability.timeout.ms = 0
5868 partitioner.class = null
5869 partitioner.ignore.keys = false
5870 receive.buffer.bytes = 32768
5871 reconnect.backoff.max.ms = 1000
5872 reconnect.backoff.ms = 50
5873 request.timeout.ms = 30000
5874 retries = 2147483647
5875 retry.backoff.max.ms = 1000
5876 retry.backoff.ms = 100
5877 sasl.client.callback.handler.class = null
5878 sasl.jaas.config = null
5879 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5880 sasl.kerberos.min.time.before.relogin = 60000
5881 sasl.kerberos.service.name = null
5882 sasl.kerberos.ticket.renew.jitter = 0.05
5883 sasl.kerberos.ticket.renew.window.factor = 0.8
5884 sasl.login.callback.handler.class = null
5885 sasl.login.class = null
5886 sasl.login.connect.timeout.ms = null
5887 sasl.login.read.timeout.ms = null
5888 sasl.login.refresh.buffer.seconds = 300
5889 sasl.login.refresh.min.period.seconds = 60
5890 sasl.login.refresh.window.factor = 0.8
5891 sasl.login.refresh.window.jitter = 0.05
5892 sasl.login.retry.backoff.max.ms = 10000
5893 sasl.login.retry.backoff.ms = 100
5894 sasl.mechanism = GSSAPI
5895 sasl.oauthbearer.assertion.algorithm = RS256
5896 sasl.oauthbearer.assertion.claim.aud = null
5897 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5898 sasl.oauthbearer.assertion.claim.iss = null
5899 sasl.oauthbearer.assertion.claim.jti.include = false
5900 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5901 sasl.oauthbearer.assertion.claim.sub = null
5902 sasl.oauthbearer.assertion.file = null
5903 sasl.oauthbearer.assertion.private.key.file = null
5904 sasl.oauthbearer.assertion.private.key.passphrase = null
5905 sasl.oauthbearer.assertion.template.file = null
5906 sasl.oauthbearer.client.credentials.client.id = null
5907 sasl.oauthbearer.client.credentials.client.secret = null
5908 sasl.oauthbearer.clock.skew.seconds = 30
5909 sasl.oauthbearer.expected.audience = null
5910 sasl.oauthbearer.expected.issuer = null
5911 sasl.oauthbearer.header.urlencode = false
5912 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5913 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5914 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5915 sasl.oauthbearer.jwks.endpoint.url = null
5916 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5917 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5918 sasl.oauthbearer.scope = null
5919 sasl.oauthbearer.scope.claim.name = scope
5920 sasl.oauthbearer.sub.claim.name = sub
5921 sasl.oauthbearer.token.endpoint.url = null
5922 security.protocol = PLAINTEXT
5923 security.providers = null
5924 send.buffer.bytes = 131072
5925 socket.connection.setup.timeout.max.ms = 30000
5926 socket.connection.setup.timeout.ms = 10000
5927 ssl.cipher.suites = null
5928 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5929 ssl.endpoint.identification.algorithm = https
5930 ssl.engine.factory.class = null
5931 ssl.key.password = null
5932 ssl.keymanager.algorithm = SunX509
5933 ssl.keystore.certificate.chain = null
5934 ssl.keystore.key = null
5935 ssl.keystore.location = null
5936 ssl.keystore.password = null
5937 ssl.keystore.type = JKS
5938 ssl.protocol = TLSv1.3
5939 ssl.provider = null
5940 ssl.secure.random.implementation = null
5941 ssl.trustmanager.algorithm = PKIX
5942 ssl.truststore.certificates = null
5943 ssl.truststore.location = null
5944 ssl.truststore.password = null
5945 ssl.truststore.type = JKS
5946 transaction.timeout.ms = 60000
5947 transaction.two.phase.commit.enable = false
5948 transactional.id = null
5949 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5950
595115:21:26.584 [virtual-649] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
595215:21:26.584 [virtual-649] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Instantiated an idempotent producer.
595315:21:26.586 [virtual-649] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
595415:21:26.586 [virtual-649] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
595515:21:26.586 [virtual-649] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314086586
595615:21:26.589 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t4) to the active controller.
595715:21:26.589 [kafka-producer-network-thread | producer-11] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-11] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t4=UNKNOWN_TOPIC_OR_PARTITION}
595815:21:26.589 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.Metadata - [Producer clientId=producer-11] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
595915:21:26.590 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t4', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
596015:21:26.590 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t4 with topic ID spUHNnTuTZ66E4H6ASTdOw.
596115:21:26.590 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t4-0 with topic ID spUHNnTuTZ66E4H6ASTdOw and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
596215:21:26.591 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-11] ProducerId set to 10 with epoch 0
596315:21:26.617 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
596415:21:26.617 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t4-0)
596515:21:26.617 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t4-0 with topic id spUHNnTuTZ66E4H6ASTdOw.
596615:21:26.619 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t4-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
596715:21:26.620 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t4-0 in /tmp/kafka-logs5982689497894266552/t4-0 with properties {}
596815:21:26.620 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] No checkpointed highwatermark is found for partition t4-0
596915:21:26.620 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] Log loaded for partition t4-0 with initial high watermark 0
597015:21:26.620 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t4-0 with topic id Some(spUHNnTuTZ66E4H6ASTdOw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
597115:21:26.709 [virtual-653] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
597215:21:26.713 [virtual-653] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
597315:21:26.713 [virtual-653] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
597415:21:26.713 [virtual-653] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
597515:21:26.713 [virtual-653] INFO o.a.k.c.m.Metrics - Metrics reporters closed
597615:21:26.713 [virtual-653] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-11 unregistered
597715:21:26.714 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5978 allow.auto.create.topics = true
5979 auto.commit.interval.ms = 5000
5980 auto.offset.reset = earliest
5981 bootstrap.servers = [localhost:6001]
5982 check.crcs = true
5983 client.dns.lookup = use_all_dns_ips
5984 client.id = consumer-embedded-kafka-spec-7
5985 client.rack =
5986 connections.max.idle.ms = 540000
5987 default.api.timeout.ms = 60000
5988 enable.auto.commit = false
5989 enable.metrics.push = true
5990 exclude.internal.topics = true
5991 fetch.max.bytes = 52428800
5992 fetch.max.wait.ms = 500
5993 fetch.min.bytes = 1
5994 group.id = embedded-kafka-spec
5995 group.instance.id = null
5996 group.protocol = classic
5997 group.remote.assignor = null
5998 heartbeat.interval.ms = 3000
5999 interceptor.classes = []
6000 internal.leave.group.on.close = true
6001 internal.throw.on.fetch.stable.offset.unsupported = false
6002 isolation.level = read_uncommitted
6003 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6004 max.partition.fetch.bytes = 1048576
6005 max.poll.interval.ms = 300000
6006 max.poll.records = 500
6007 metadata.max.age.ms = 300000
6008 metadata.recovery.rebootstrap.trigger.ms = 300000
6009 metadata.recovery.strategy = rebootstrap
6010 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6011 metrics.num.samples = 2
6012 metrics.recording.level = INFO
6013 metrics.sample.window.ms = 30000
6014 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6015 receive.buffer.bytes = 65536
6016 reconnect.backoff.max.ms = 1000
6017 reconnect.backoff.ms = 50
6018 request.timeout.ms = 30000
6019 retry.backoff.max.ms = 1000
6020 retry.backoff.ms = 100
6021 sasl.client.callback.handler.class = null
6022 sasl.jaas.config = null
6023 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6024 sasl.kerberos.min.time.before.relogin = 60000
6025 sasl.kerberos.service.name = null
6026 sasl.kerberos.ticket.renew.jitter = 0.05
6027 sasl.kerberos.ticket.renew.window.factor = 0.8
6028 sasl.login.callback.handler.class = null
6029 sasl.login.class = null
6030 sasl.login.connect.timeout.ms = null
6031 sasl.login.read.timeout.ms = null
6032 sasl.login.refresh.buffer.seconds = 300
6033 sasl.login.refresh.min.period.seconds = 60
6034 sasl.login.refresh.window.factor = 0.8
6035 sasl.login.refresh.window.jitter = 0.05
6036 sasl.login.retry.backoff.max.ms = 10000
6037 sasl.login.retry.backoff.ms = 100
6038 sasl.mechanism = GSSAPI
6039 sasl.oauthbearer.assertion.algorithm = RS256
6040 sasl.oauthbearer.assertion.claim.aud = null
6041 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6042 sasl.oauthbearer.assertion.claim.iss = null
6043 sasl.oauthbearer.assertion.claim.jti.include = false
6044 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6045 sasl.oauthbearer.assertion.claim.sub = null
6046 sasl.oauthbearer.assertion.file = null
6047 sasl.oauthbearer.assertion.private.key.file = null
6048 sasl.oauthbearer.assertion.private.key.passphrase = null
6049 sasl.oauthbearer.assertion.template.file = null
6050 sasl.oauthbearer.client.credentials.client.id = null
6051 sasl.oauthbearer.client.credentials.client.secret = null
6052 sasl.oauthbearer.clock.skew.seconds = 30
6053 sasl.oauthbearer.expected.audience = null
6054 sasl.oauthbearer.expected.issuer = null
6055 sasl.oauthbearer.header.urlencode = false
6056 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6057 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6058 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6059 sasl.oauthbearer.jwks.endpoint.url = null
6060 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6061 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6062 sasl.oauthbearer.scope = null
6063 sasl.oauthbearer.scope.claim.name = scope
6064 sasl.oauthbearer.sub.claim.name = sub
6065 sasl.oauthbearer.token.endpoint.url = null
6066 security.protocol = PLAINTEXT
6067 security.providers = null
6068 send.buffer.bytes = 131072
6069 session.timeout.ms = 45000
6070 share.acknowledgement.mode = implicit
6071 socket.connection.setup.timeout.max.ms = 30000
6072 socket.connection.setup.timeout.ms = 10000
6073 ssl.cipher.suites = null
6074 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6075 ssl.endpoint.identification.algorithm = https
6076 ssl.engine.factory.class = null
6077 ssl.key.password = null
6078 ssl.keymanager.algorithm = SunX509
6079 ssl.keystore.certificate.chain = null
6080 ssl.keystore.key = null
6081 ssl.keystore.location = null
6082 ssl.keystore.password = null
6083 ssl.keystore.type = JKS
6084 ssl.protocol = TLSv1.3
6085 ssl.provider = null
6086 ssl.secure.random.implementation = null
6087 ssl.trustmanager.algorithm = PKIX
6088 ssl.truststore.certificates = null
6089 ssl.truststore.location = null
6090 ssl.truststore.password = null
6091 ssl.truststore.type = JKS
6092 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6093
609415:21:26.714 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
609515:21:26.717 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
609615:21:26.717 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
609715:21:26.717 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314086717
609815:21:26.717 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Subscribed to topic(s): t4
609915:21:26.719 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
610015:21:26.721 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
610115:21:26.722 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
610215:21:26.724 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed and requesting the member to rejoin with this id.
610315:21:26.725 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed
610415:21:26.725 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
610515:21:26.725 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed joins group embedded-kafka-spec in Empty state. Adding to the group now.
610615:21:26.726 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed).
610715:21:29.726 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 3 with 1 members.
610815:21:29.727 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed', protocol='range'}
610915:21:29.727 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Finished assignment for group at generation 3: {consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed=Assignment(partitions=[t4-0])}
611015:21:29.728 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed for group embedded-kafka-spec for generation 3. The group has 1 members, 0 of which are static.
611115:21:29.735 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed', protocol='range'}
611215:21:29.736 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t4-0])
611315:21:29.736 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t4-0]
611415:21:29.737 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Found no committed offset for partition t4-0
611515:21:29.738 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting offset for partition t4-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
611615:21:29.758 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t4-0]
611715:21:29.758 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
611815:21:29.758 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
611915:21:29.758 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
612015:21:29.759 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
612115:21:29.759 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-7-44e06cb6-5d2a-4d96-89d6-5994258f46ed) members.).
612215:21:29.759 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 4 is now empty.
612315:21:30.241 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
612415:21:30.241 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
612515:21:30.241 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
612615:21:30.241 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
612715:21:30.243 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-7 unregistered
612815:21:30.245 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6129 acks = -1
6130 batch.size = 16384
6131 bootstrap.servers = [localhost:6001]
6132 buffer.memory = 33554432
6133 client.dns.lookup = use_all_dns_ips
6134 client.id = producer-12
6135 compression.gzip.level = -1
6136 compression.lz4.level = 9
6137 compression.type = none
6138 compression.zstd.level = 3
6139 connections.max.idle.ms = 540000
6140 delivery.timeout.ms = 120000
6141 enable.idempotence = true
6142 enable.metrics.push = true
6143 interceptor.classes = []
6144 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6145 linger.ms = 5
6146 max.block.ms = 10000
6147 max.in.flight.requests.per.connection = 5
6148 max.request.size = 1048576
6149 metadata.max.age.ms = 300000
6150 metadata.max.idle.ms = 300000
6151 metadata.recovery.rebootstrap.trigger.ms = 300000
6152 metadata.recovery.strategy = rebootstrap
6153 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6154 metrics.num.samples = 2
6155 metrics.recording.level = INFO
6156 metrics.sample.window.ms = 30000
6157 partitioner.adaptive.partitioning.enable = true
6158 partitioner.availability.timeout.ms = 0
6159 partitioner.class = null
6160 partitioner.ignore.keys = false
6161 receive.buffer.bytes = 32768
6162 reconnect.backoff.max.ms = 1000
6163 reconnect.backoff.ms = 50
6164 request.timeout.ms = 30000
6165 retries = 2147483647
6166 retry.backoff.max.ms = 1000
6167 retry.backoff.ms = 1000
6168 sasl.client.callback.handler.class = null
6169 sasl.jaas.config = null
6170 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6171 sasl.kerberos.min.time.before.relogin = 60000
6172 sasl.kerberos.service.name = null
6173 sasl.kerberos.ticket.renew.jitter = 0.05
6174 sasl.kerberos.ticket.renew.window.factor = 0.8
6175 sasl.login.callback.handler.class = null
6176 sasl.login.class = null
6177 sasl.login.connect.timeout.ms = null
6178 sasl.login.read.timeout.ms = null
6179 sasl.login.refresh.buffer.seconds = 300
6180 sasl.login.refresh.min.period.seconds = 60
6181 sasl.login.refresh.window.factor = 0.8
6182 sasl.login.refresh.window.jitter = 0.05
6183 sasl.login.retry.backoff.max.ms = 10000
6184 sasl.login.retry.backoff.ms = 100
6185 sasl.mechanism = GSSAPI
6186 sasl.oauthbearer.assertion.algorithm = RS256
6187 sasl.oauthbearer.assertion.claim.aud = null
6188 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6189 sasl.oauthbearer.assertion.claim.iss = null
6190 sasl.oauthbearer.assertion.claim.jti.include = false
6191 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6192 sasl.oauthbearer.assertion.claim.sub = null
6193 sasl.oauthbearer.assertion.file = null
6194 sasl.oauthbearer.assertion.private.key.file = null
6195 sasl.oauthbearer.assertion.private.key.passphrase = null
6196 sasl.oauthbearer.assertion.template.file = null
6197 sasl.oauthbearer.client.credentials.client.id = null
6198 sasl.oauthbearer.client.credentials.client.secret = null
6199 sasl.oauthbearer.clock.skew.seconds = 30
6200 sasl.oauthbearer.expected.audience = null
6201 sasl.oauthbearer.expected.issuer = null
6202 sasl.oauthbearer.header.urlencode = false
6203 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6204 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6205 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6206 sasl.oauthbearer.jwks.endpoint.url = null
6207 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6208 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6209 sasl.oauthbearer.scope = null
6210 sasl.oauthbearer.scope.claim.name = scope
6211 sasl.oauthbearer.sub.claim.name = sub
6212 sasl.oauthbearer.token.endpoint.url = null
6213 security.protocol = PLAINTEXT
6214 security.providers = null
6215 send.buffer.bytes = 131072
6216 socket.connection.setup.timeout.max.ms = 30000
6217 socket.connection.setup.timeout.ms = 10000
6218 ssl.cipher.suites = null
6219 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6220 ssl.endpoint.identification.algorithm = https
6221 ssl.engine.factory.class = null
6222 ssl.key.password = null
6223 ssl.keymanager.algorithm = SunX509
6224 ssl.keystore.certificate.chain = null
6225 ssl.keystore.key = null
6226 ssl.keystore.location = null
6227 ssl.keystore.password = null
6228 ssl.keystore.type = JKS
6229 ssl.protocol = TLSv1.3
6230 ssl.provider = null
6231 ssl.secure.random.implementation = null
6232 ssl.trustmanager.algorithm = PKIX
6233 ssl.truststore.certificates = null
6234 ssl.truststore.location = null
6235 ssl.truststore.password = null
6236 ssl.truststore.type = JKS
6237 transaction.timeout.ms = 60000
6238 transaction.two.phase.commit.enable = false
6239 transactional.id = null
6240 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6241
624215:21:30.245 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
624315:21:30.245 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Instantiated an idempotent producer.
624415:21:30.247 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
624515:21:30.248 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
624615:21:30.248 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314090247
624715:21:30.250 [data-plane-kafka-request-handler-3] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_1) to the active controller.
624815:21:30.250 [kafka-producer-network-thread | producer-12] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-12] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t5_1=UNKNOWN_TOPIC_OR_PARTITION}
624915:21:30.250 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.Metadata - [Producer clientId=producer-12] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
625015:21:30.251 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-12] ProducerId set to 11 with epoch 0
625115:21:30.251 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
625215:21:30.251 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_1 with topic ID LsOfElaqQT-rRlHjYyebOg.
625315:21:30.251 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_1-0 with topic ID LsOfElaqQT-rRlHjYyebOg and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
625415:21:30.277 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
625515:21:30.277 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_1-0)
625615:21:30.277 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_1-0 with topic id LsOfElaqQT-rRlHjYyebOg.
625715:21:30.279 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_1-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
625815:21:30.280 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_1-0 in /tmp/kafka-logs5982689497894266552/t5_1-0 with properties {}
625915:21:30.280 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] No checkpointed highwatermark is found for partition t5_1-0
626015:21:30.280 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] Log loaded for partition t5_1-0 with initial high watermark 0
626115:21:30.280 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_1-0 with topic id Some(LsOfElaqQT-rRlHjYyebOg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
626215:21:31.261 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
626315:21:31.263 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
626415:21:31.263 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
626515:21:31.263 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
626615:21:31.263 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
626715:21:31.263 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-12 unregistered
626815:21:31.264 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6269 acks = -1
6270 batch.size = 16384
6271 bootstrap.servers = [localhost:6001]
6272 buffer.memory = 33554432
6273 client.dns.lookup = use_all_dns_ips
6274 client.id = producer-13
6275 compression.gzip.level = -1
6276 compression.lz4.level = 9
6277 compression.type = none
6278 compression.zstd.level = 3
6279 connections.max.idle.ms = 540000
6280 delivery.timeout.ms = 120000
6281 enable.idempotence = true
6282 enable.metrics.push = true
6283 interceptor.classes = []
6284 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6285 linger.ms = 5
6286 max.block.ms = 10000
6287 max.in.flight.requests.per.connection = 5
6288 max.request.size = 1048576
6289 metadata.max.age.ms = 300000
6290 metadata.max.idle.ms = 300000
6291 metadata.recovery.rebootstrap.trigger.ms = 300000
6292 metadata.recovery.strategy = rebootstrap
6293 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6294 metrics.num.samples = 2
6295 metrics.recording.level = INFO
6296 metrics.sample.window.ms = 30000
6297 partitioner.adaptive.partitioning.enable = true
6298 partitioner.availability.timeout.ms = 0
6299 partitioner.class = null
6300 partitioner.ignore.keys = false
6301 receive.buffer.bytes = 32768
6302 reconnect.backoff.max.ms = 1000
6303 reconnect.backoff.ms = 50
6304 request.timeout.ms = 30000
6305 retries = 2147483647
6306 retry.backoff.max.ms = 1000
6307 retry.backoff.ms = 1000
6308 sasl.client.callback.handler.class = null
6309 sasl.jaas.config = null
6310 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6311 sasl.kerberos.min.time.before.relogin = 60000
6312 sasl.kerberos.service.name = null
6313 sasl.kerberos.ticket.renew.jitter = 0.05
6314 sasl.kerberos.ticket.renew.window.factor = 0.8
6315 sasl.login.callback.handler.class = null
6316 sasl.login.class = null
6317 sasl.login.connect.timeout.ms = null
6318 sasl.login.read.timeout.ms = null
6319 sasl.login.refresh.buffer.seconds = 300
6320 sasl.login.refresh.min.period.seconds = 60
6321 sasl.login.refresh.window.factor = 0.8
6322 sasl.login.refresh.window.jitter = 0.05
6323 sasl.login.retry.backoff.max.ms = 10000
6324 sasl.login.retry.backoff.ms = 100
6325 sasl.mechanism = GSSAPI
6326 sasl.oauthbearer.assertion.algorithm = RS256
6327 sasl.oauthbearer.assertion.claim.aud = null
6328 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6329 sasl.oauthbearer.assertion.claim.iss = null
6330 sasl.oauthbearer.assertion.claim.jti.include = false
6331 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6332 sasl.oauthbearer.assertion.claim.sub = null
6333 sasl.oauthbearer.assertion.file = null
6334 sasl.oauthbearer.assertion.private.key.file = null
6335 sasl.oauthbearer.assertion.private.key.passphrase = null
6336 sasl.oauthbearer.assertion.template.file = null
6337 sasl.oauthbearer.client.credentials.client.id = null
6338 sasl.oauthbearer.client.credentials.client.secret = null
6339 sasl.oauthbearer.clock.skew.seconds = 30
6340 sasl.oauthbearer.expected.audience = null
6341 sasl.oauthbearer.expected.issuer = null
6342 sasl.oauthbearer.header.urlencode = false
6343 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6344 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6345 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6346 sasl.oauthbearer.jwks.endpoint.url = null
6347 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6348 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6349 sasl.oauthbearer.scope = null
6350 sasl.oauthbearer.scope.claim.name = scope
6351 sasl.oauthbearer.sub.claim.name = sub
6352 sasl.oauthbearer.token.endpoint.url = null
6353 security.protocol = PLAINTEXT
6354 security.providers = null
6355 send.buffer.bytes = 131072
6356 socket.connection.setup.timeout.max.ms = 30000
6357 socket.connection.setup.timeout.ms = 10000
6358 ssl.cipher.suites = null
6359 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6360 ssl.endpoint.identification.algorithm = https
6361 ssl.engine.factory.class = null
6362 ssl.key.password = null
6363 ssl.keymanager.algorithm = SunX509
6364 ssl.keystore.certificate.chain = null
6365 ssl.keystore.key = null
6366 ssl.keystore.location = null
6367 ssl.keystore.password = null
6368 ssl.keystore.type = JKS
6369 ssl.protocol = TLSv1.3
6370 ssl.provider = null
6371 ssl.secure.random.implementation = null
6372 ssl.trustmanager.algorithm = PKIX
6373 ssl.truststore.certificates = null
6374 ssl.truststore.location = null
6375 ssl.truststore.password = null
6376 ssl.truststore.type = JKS
6377 transaction.timeout.ms = 60000
6378 transaction.two.phase.commit.enable = false
6379 transactional.id = null
6380 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6381
638215:21:31.264 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
638315:21:31.264 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Instantiated an idempotent producer.
638415:21:31.266 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
638515:21:31.266 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
638615:21:31.266 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314091266
638715:21:31.269 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.Metadata - [Producer clientId=producer-13] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
638815:21:31.269 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-13] ProducerId set to 12 with epoch 0
638915:21:31.277 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
639015:21:31.278 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
639115:21:31.279 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
639215:21:31.279 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
639315:21:31.279 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
639415:21:31.279 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-13 unregistered
639515:21:31.279 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6396 acks = -1
6397 batch.size = 16384
6398 bootstrap.servers = [localhost:6001]
6399 buffer.memory = 33554432
6400 client.dns.lookup = use_all_dns_ips
6401 client.id = producer-14
6402 compression.gzip.level = -1
6403 compression.lz4.level = 9
6404 compression.type = none
6405 compression.zstd.level = 3
6406 connections.max.idle.ms = 540000
6407 delivery.timeout.ms = 120000
6408 enable.idempotence = true
6409 enable.metrics.push = true
6410 interceptor.classes = []
6411 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6412 linger.ms = 5
6413 max.block.ms = 10000
6414 max.in.flight.requests.per.connection = 5
6415 max.request.size = 1048576
6416 metadata.max.age.ms = 300000
6417 metadata.max.idle.ms = 300000
6418 metadata.recovery.rebootstrap.trigger.ms = 300000
6419 metadata.recovery.strategy = rebootstrap
6420 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6421 metrics.num.samples = 2
6422 metrics.recording.level = INFO
6423 metrics.sample.window.ms = 30000
6424 partitioner.adaptive.partitioning.enable = true
6425 partitioner.availability.timeout.ms = 0
6426 partitioner.class = null
6427 partitioner.ignore.keys = false
6428 receive.buffer.bytes = 32768
6429 reconnect.backoff.max.ms = 1000
6430 reconnect.backoff.ms = 50
6431 request.timeout.ms = 30000
6432 retries = 2147483647
6433 retry.backoff.max.ms = 1000
6434 retry.backoff.ms = 1000
6435 sasl.client.callback.handler.class = null
6436 sasl.jaas.config = null
6437 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6438 sasl.kerberos.min.time.before.relogin = 60000
6439 sasl.kerberos.service.name = null
6440 sasl.kerberos.ticket.renew.jitter = 0.05
6441 sasl.kerberos.ticket.renew.window.factor = 0.8
6442 sasl.login.callback.handler.class = null
6443 sasl.login.class = null
6444 sasl.login.connect.timeout.ms = null
6445 sasl.login.read.timeout.ms = null
6446 sasl.login.refresh.buffer.seconds = 300
6447 sasl.login.refresh.min.period.seconds = 60
6448 sasl.login.refresh.window.factor = 0.8
6449 sasl.login.refresh.window.jitter = 0.05
6450 sasl.login.retry.backoff.max.ms = 10000
6451 sasl.login.retry.backoff.ms = 100
6452 sasl.mechanism = GSSAPI
6453 sasl.oauthbearer.assertion.algorithm = RS256
6454 sasl.oauthbearer.assertion.claim.aud = null
6455 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6456 sasl.oauthbearer.assertion.claim.iss = null
6457 sasl.oauthbearer.assertion.claim.jti.include = false
6458 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6459 sasl.oauthbearer.assertion.claim.sub = null
6460 sasl.oauthbearer.assertion.file = null
6461 sasl.oauthbearer.assertion.private.key.file = null
6462 sasl.oauthbearer.assertion.private.key.passphrase = null
6463 sasl.oauthbearer.assertion.template.file = null
6464 sasl.oauthbearer.client.credentials.client.id = null
6465 sasl.oauthbearer.client.credentials.client.secret = null
6466 sasl.oauthbearer.clock.skew.seconds = 30
6467 sasl.oauthbearer.expected.audience = null
6468 sasl.oauthbearer.expected.issuer = null
6469 sasl.oauthbearer.header.urlencode = false
6470 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6471 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6472 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6473 sasl.oauthbearer.jwks.endpoint.url = null
6474 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6475 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6476 sasl.oauthbearer.scope = null
6477 sasl.oauthbearer.scope.claim.name = scope
6478 sasl.oauthbearer.sub.claim.name = sub
6479 sasl.oauthbearer.token.endpoint.url = null
6480 security.protocol = PLAINTEXT
6481 security.providers = null
6482 send.buffer.bytes = 131072
6483 socket.connection.setup.timeout.max.ms = 30000
6484 socket.connection.setup.timeout.ms = 10000
6485 ssl.cipher.suites = null
6486 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6487 ssl.endpoint.identification.algorithm = https
6488 ssl.engine.factory.class = null
6489 ssl.key.password = null
6490 ssl.keymanager.algorithm = SunX509
6491 ssl.keystore.certificate.chain = null
6492 ssl.keystore.key = null
6493 ssl.keystore.location = null
6494 ssl.keystore.password = null
6495 ssl.keystore.type = JKS
6496 ssl.protocol = TLSv1.3
6497 ssl.provider = null
6498 ssl.secure.random.implementation = null
6499 ssl.trustmanager.algorithm = PKIX
6500 ssl.truststore.certificates = null
6501 ssl.truststore.location = null
6502 ssl.truststore.password = null
6503 ssl.truststore.type = JKS
6504 transaction.timeout.ms = 60000
6505 transaction.two.phase.commit.enable = false
6506 transactional.id = null
6507 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6508
650915:21:31.280 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
651015:21:31.280 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Instantiated an idempotent producer.
651115:21:31.282 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
651215:21:31.282 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
651315:21:31.282 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314091282
651415:21:31.284 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.Metadata - [Producer clientId=producer-14] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
651515:21:31.285 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-14] ProducerId set to 13 with epoch 0
651615:21:31.293 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
651715:21:31.295 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
651815:21:31.295 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
651915:21:31.295 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
652015:21:31.295 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
652115:21:31.295 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-14 unregistered
652215:21:31.297 [virtual-658] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6523 allow.auto.create.topics = true
6524 auto.commit.interval.ms = 5000
6525 auto.offset.reset = earliest
6526 bootstrap.servers = [localhost:6001]
6527 check.crcs = true
6528 client.dns.lookup = use_all_dns_ips
6529 client.id = consumer-g5_1-8
6530 client.rack =
6531 connections.max.idle.ms = 540000
6532 default.api.timeout.ms = 60000
6533 enable.auto.commit = false
6534 enable.metrics.push = true
6535 exclude.internal.topics = true
6536 fetch.max.bytes = 52428800
6537 fetch.max.wait.ms = 500
6538 fetch.min.bytes = 1
6539 group.id = g5_1
6540 group.instance.id = null
6541 group.protocol = classic
6542 group.remote.assignor = null
6543 heartbeat.interval.ms = 3000
6544 interceptor.classes = []
6545 internal.leave.group.on.close = true
6546 internal.throw.on.fetch.stable.offset.unsupported = false
6547 isolation.level = read_uncommitted
6548 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6549 max.partition.fetch.bytes = 1048576
6550 max.poll.interval.ms = 300000
6551 max.poll.records = 500
6552 metadata.max.age.ms = 300000
6553 metadata.recovery.rebootstrap.trigger.ms = 300000
6554 metadata.recovery.strategy = rebootstrap
6555 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6556 metrics.num.samples = 2
6557 metrics.recording.level = INFO
6558 metrics.sample.window.ms = 30000
6559 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6560 receive.buffer.bytes = 65536
6561 reconnect.backoff.max.ms = 1000
6562 reconnect.backoff.ms = 50
6563 request.timeout.ms = 30000
6564 retry.backoff.max.ms = 1000
6565 retry.backoff.ms = 100
6566 sasl.client.callback.handler.class = null
6567 sasl.jaas.config = null
6568 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6569 sasl.kerberos.min.time.before.relogin = 60000
6570 sasl.kerberos.service.name = null
6571 sasl.kerberos.ticket.renew.jitter = 0.05
6572 sasl.kerberos.ticket.renew.window.factor = 0.8
6573 sasl.login.callback.handler.class = null
6574 sasl.login.class = null
6575 sasl.login.connect.timeout.ms = null
6576 sasl.login.read.timeout.ms = null
6577 sasl.login.refresh.buffer.seconds = 300
6578 sasl.login.refresh.min.period.seconds = 60
6579 sasl.login.refresh.window.factor = 0.8
6580 sasl.login.refresh.window.jitter = 0.05
6581 sasl.login.retry.backoff.max.ms = 10000
6582 sasl.login.retry.backoff.ms = 100
6583 sasl.mechanism = GSSAPI
6584 sasl.oauthbearer.assertion.algorithm = RS256
6585 sasl.oauthbearer.assertion.claim.aud = null
6586 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6587 sasl.oauthbearer.assertion.claim.iss = null
6588 sasl.oauthbearer.assertion.claim.jti.include = false
6589 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6590 sasl.oauthbearer.assertion.claim.sub = null
6591 sasl.oauthbearer.assertion.file = null
6592 sasl.oauthbearer.assertion.private.key.file = null
6593 sasl.oauthbearer.assertion.private.key.passphrase = null
6594 sasl.oauthbearer.assertion.template.file = null
6595 sasl.oauthbearer.client.credentials.client.id = null
6596 sasl.oauthbearer.client.credentials.client.secret = null
6597 sasl.oauthbearer.clock.skew.seconds = 30
6598 sasl.oauthbearer.expected.audience = null
6599 sasl.oauthbearer.expected.issuer = null
6600 sasl.oauthbearer.header.urlencode = false
6601 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6602 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6603 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6604 sasl.oauthbearer.jwks.endpoint.url = null
6605 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6606 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6607 sasl.oauthbearer.scope = null
6608 sasl.oauthbearer.scope.claim.name = scope
6609 sasl.oauthbearer.sub.claim.name = sub
6610 sasl.oauthbearer.token.endpoint.url = null
6611 security.protocol = PLAINTEXT
6612 security.providers = null
6613 send.buffer.bytes = 131072
6614 session.timeout.ms = 45000
6615 share.acknowledgement.mode = implicit
6616 socket.connection.setup.timeout.max.ms = 30000
6617 socket.connection.setup.timeout.ms = 10000
6618 ssl.cipher.suites = null
6619 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6620 ssl.endpoint.identification.algorithm = https
6621 ssl.engine.factory.class = null
6622 ssl.key.password = null
6623 ssl.keymanager.algorithm = SunX509
6624 ssl.keystore.certificate.chain = null
6625 ssl.keystore.key = null
6626 ssl.keystore.location = null
6627 ssl.keystore.password = null
6628 ssl.keystore.type = JKS
6629 ssl.protocol = TLSv1.3
6630 ssl.provider = null
6631 ssl.secure.random.implementation = null
6632 ssl.trustmanager.algorithm = PKIX
6633 ssl.truststore.certificates = null
6634 ssl.truststore.location = null
6635 ssl.truststore.password = null
6636 ssl.truststore.type = JKS
6637 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6638
663915:21:31.297 [virtual-658] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
664015:21:31.297 [virtual-660] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6641 allow.auto.create.topics = true
6642 auto.commit.interval.ms = 5000
6643 auto.offset.reset = earliest
6644 bootstrap.servers = [localhost:6001]
6645 check.crcs = true
6646 client.dns.lookup = use_all_dns_ips
6647 client.id = consumer-g5_1-9
6648 client.rack =
6649 connections.max.idle.ms = 540000
6650 default.api.timeout.ms = 60000
6651 enable.auto.commit = false
6652 enable.metrics.push = true
6653 exclude.internal.topics = true
6654 fetch.max.bytes = 52428800
6655 fetch.max.wait.ms = 500
6656 fetch.min.bytes = 1
6657 group.id = g5_1
6658 group.instance.id = null
6659 group.protocol = classic
6660 group.remote.assignor = null
6661 heartbeat.interval.ms = 3000
6662 interceptor.classes = []
6663 internal.leave.group.on.close = true
6664 internal.throw.on.fetch.stable.offset.unsupported = false
6665 isolation.level = read_uncommitted
6666 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6667 max.partition.fetch.bytes = 1048576
6668 max.poll.interval.ms = 300000
6669 max.poll.records = 500
6670 metadata.max.age.ms = 300000
6671 metadata.recovery.rebootstrap.trigger.ms = 300000
6672 metadata.recovery.strategy = rebootstrap
6673 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6674 metrics.num.samples = 2
6675 metrics.recording.level = INFO
6676 metrics.sample.window.ms = 30000
6677 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6678 receive.buffer.bytes = 65536
6679 reconnect.backoff.max.ms = 1000
6680 reconnect.backoff.ms = 50
6681 request.timeout.ms = 30000
6682 retry.backoff.max.ms = 1000
6683 retry.backoff.ms = 100
6684 sasl.client.callback.handler.class = null
6685 sasl.jaas.config = null
6686 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6687 sasl.kerberos.min.time.before.relogin = 60000
6688 sasl.kerberos.service.name = null
6689 sasl.kerberos.ticket.renew.jitter = 0.05
6690 sasl.kerberos.ticket.renew.window.factor = 0.8
6691 sasl.login.callback.handler.class = null
6692 sasl.login.class = null
6693 sasl.login.connect.timeout.ms = null
6694 sasl.login.read.timeout.ms = null
6695 sasl.login.refresh.buffer.seconds = 300
6696 sasl.login.refresh.min.period.seconds = 60
6697 sasl.login.refresh.window.factor = 0.8
6698 sasl.login.refresh.window.jitter = 0.05
6699 sasl.login.retry.backoff.max.ms = 10000
6700 sasl.login.retry.backoff.ms = 100
6701 sasl.mechanism = GSSAPI
6702 sasl.oauthbearer.assertion.algorithm = RS256
6703 sasl.oauthbearer.assertion.claim.aud = null
6704 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6705 sasl.oauthbearer.assertion.claim.iss = null
6706 sasl.oauthbearer.assertion.claim.jti.include = false
6707 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6708 sasl.oauthbearer.assertion.claim.sub = null
6709 sasl.oauthbearer.assertion.file = null
6710 sasl.oauthbearer.assertion.private.key.file = null
6711 sasl.oauthbearer.assertion.private.key.passphrase = null
6712 sasl.oauthbearer.assertion.template.file = null
6713 sasl.oauthbearer.client.credentials.client.id = null
6714 sasl.oauthbearer.client.credentials.client.secret = null
6715 sasl.oauthbearer.clock.skew.seconds = 30
6716 sasl.oauthbearer.expected.audience = null
6717 sasl.oauthbearer.expected.issuer = null
6718 sasl.oauthbearer.header.urlencode = false
6719 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6720 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6721 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6722 sasl.oauthbearer.jwks.endpoint.url = null
6723 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6724 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6725 sasl.oauthbearer.scope = null
6726 sasl.oauthbearer.scope.claim.name = scope
6727 sasl.oauthbearer.sub.claim.name = sub
6728 sasl.oauthbearer.token.endpoint.url = null
6729 security.protocol = PLAINTEXT
6730 security.providers = null
6731 send.buffer.bytes = 131072
6732 session.timeout.ms = 45000
6733 share.acknowledgement.mode = implicit
6734 socket.connection.setup.timeout.max.ms = 30000
6735 socket.connection.setup.timeout.ms = 10000
6736 ssl.cipher.suites = null
6737 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6738 ssl.endpoint.identification.algorithm = https
6739 ssl.engine.factory.class = null
6740 ssl.key.password = null
6741 ssl.keymanager.algorithm = SunX509
6742 ssl.keystore.certificate.chain = null
6743 ssl.keystore.key = null
6744 ssl.keystore.location = null
6745 ssl.keystore.password = null
6746 ssl.keystore.type = JKS
6747 ssl.protocol = TLSv1.3
6748 ssl.provider = null
6749 ssl.secure.random.implementation = null
6750 ssl.trustmanager.algorithm = PKIX
6751 ssl.truststore.certificates = null
6752 ssl.truststore.location = null
6753 ssl.truststore.password = null
6754 ssl.truststore.type = JKS
6755 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6756
675715:21:31.297 [virtual-660] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
675815:21:31.300 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
675915:21:31.300 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
676015:21:31.300 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314091300
676115:21:31.302 [virtual-658] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
676215:21:31.302 [virtual-658] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
676315:21:31.302 [virtual-658] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314091302
676415:21:31.302 [virtual-664] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Subscribed to topic(s): t5_2
676515:21:31.303 [virtual-660] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6766 acks = -1
6767 batch.size = 16384
6768 bootstrap.servers = [localhost:6001]
6769 buffer.memory = 33554432
6770 client.dns.lookup = use_all_dns_ips
6771 client.id = producer-15
6772 compression.gzip.level = -1
6773 compression.lz4.level = 9
6774 compression.type = none
6775 compression.zstd.level = 3
6776 connections.max.idle.ms = 540000
6777 delivery.timeout.ms = 120000
6778 enable.idempotence = true
6779 enable.metrics.push = true
6780 interceptor.classes = []
6781 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6782 linger.ms = 5
6783 max.block.ms = 60000
6784 max.in.flight.requests.per.connection = 5
6785 max.request.size = 1048576
6786 metadata.max.age.ms = 300000
6787 metadata.max.idle.ms = 300000
6788 metadata.recovery.rebootstrap.trigger.ms = 300000
6789 metadata.recovery.strategy = rebootstrap
6790 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6791 metrics.num.samples = 2
6792 metrics.recording.level = INFO
6793 metrics.sample.window.ms = 30000
6794 partitioner.adaptive.partitioning.enable = true
6795 partitioner.availability.timeout.ms = 0
6796 partitioner.class = null
6797 partitioner.ignore.keys = false
6798 receive.buffer.bytes = 32768
6799 reconnect.backoff.max.ms = 1000
6800 reconnect.backoff.ms = 50
6801 request.timeout.ms = 30000
6802 retries = 2147483647
6803 retry.backoff.max.ms = 1000
6804 retry.backoff.ms = 100
6805 sasl.client.callback.handler.class = null
6806 sasl.jaas.config = null
6807 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6808 sasl.kerberos.min.time.before.relogin = 60000
6809 sasl.kerberos.service.name = null
6810 sasl.kerberos.ticket.renew.jitter = 0.05
6811 sasl.kerberos.ticket.renew.window.factor = 0.8
6812 sasl.login.callback.handler.class = null
6813 sasl.login.class = null
6814 sasl.login.connect.timeout.ms = null
6815 sasl.login.read.timeout.ms = null
6816 sasl.login.refresh.buffer.seconds = 300
6817 sasl.login.refresh.min.period.seconds = 60
6818 sasl.login.refresh.window.factor = 0.8
6819 sasl.login.refresh.window.jitter = 0.05
6820 sasl.login.retry.backoff.max.ms = 10000
6821 sasl.login.retry.backoff.ms = 100
6822 sasl.mechanism = GSSAPI
6823 sasl.oauthbearer.assertion.algorithm = RS256
6824 sasl.oauthbearer.assertion.claim.aud = null
6825 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6826 sasl.oauthbearer.assertion.claim.iss = null
6827 sasl.oauthbearer.assertion.claim.jti.include = false
6828 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6829 sasl.oauthbearer.assertion.claim.sub = null
6830 sasl.oauthbearer.assertion.file = null
6831 sasl.oauthbearer.assertion.private.key.file = null
6832 sasl.oauthbearer.assertion.private.key.passphrase = null
6833 sasl.oauthbearer.assertion.template.file = null
6834 sasl.oauthbearer.client.credentials.client.id = null
6835 sasl.oauthbearer.client.credentials.client.secret = null
6836 sasl.oauthbearer.clock.skew.seconds = 30
6837 sasl.oauthbearer.expected.audience = null
6838 sasl.oauthbearer.expected.issuer = null
6839 sasl.oauthbearer.header.urlencode = false
6840 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6841 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6842 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6843 sasl.oauthbearer.jwks.endpoint.url = null
6844 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6845 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6846 sasl.oauthbearer.scope = null
6847 sasl.oauthbearer.scope.claim.name = scope
6848 sasl.oauthbearer.sub.claim.name = sub
6849 sasl.oauthbearer.token.endpoint.url = null
6850 security.protocol = PLAINTEXT
6851 security.providers = null
6852 send.buffer.bytes = 131072
6853 socket.connection.setup.timeout.max.ms = 30000
6854 socket.connection.setup.timeout.ms = 10000
6855 ssl.cipher.suites = null
6856 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6857 ssl.endpoint.identification.algorithm = https
6858 ssl.engine.factory.class = null
6859 ssl.key.password = null
6860 ssl.keymanager.algorithm = SunX509
6861 ssl.keystore.certificate.chain = null
6862 ssl.keystore.key = null
6863 ssl.keystore.location = null
6864 ssl.keystore.password = null
6865 ssl.keystore.type = JKS
6866 ssl.protocol = TLSv1.3
6867 ssl.provider = null
6868 ssl.secure.random.implementation = null
6869 ssl.trustmanager.algorithm = PKIX
6870 ssl.truststore.certificates = null
6871 ssl.truststore.location = null
6872 ssl.truststore.password = null
6873 ssl.truststore.type = JKS
6874 transaction.timeout.ms = 60000
6875 transaction.two.phase.commit.enable = false
6876 transactional.id = null
6877 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6878
687915:21:31.304 [virtual-660] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
688015:21:31.304 [virtual-660] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Instantiated an idempotent producer.
688115:21:31.306 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
688215:21:31.307 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
688315:21:31.308 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314091306
688415:21:31.309 [virtual-661] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Subscribed to topic(s): t5_1
688515:21:31.309 [data-plane-kafka-request-handler-5] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_2) to the active controller.
688615:21:31.310 [virtual-664] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t5_2=UNKNOWN_TOPIC_OR_PARTITION}
688715:21:31.311 [virtual-664] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
688815:21:31.311 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.Metadata - [Producer clientId=producer-15] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
688915:21:31.312 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-15] ProducerId set to 14 with epoch 0
689015:21:31.313 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
689115:21:31.313 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
689215:21:31.314 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_2 with topic ID luJObqVwRveObARjBBPdAQ.
689315:21:31.314 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_2-0 with topic ID luJObqVwRveObARjBBPdAQ and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
689415:21:31.314 [virtual-661] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
689515:21:31.314 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
689615:21:31.315 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
689715:21:31.316 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808 and requesting the member to rejoin with this id.
689815:21:31.317 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808
689915:21:31.317 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
690015:21:31.317 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
690115:21:31.318 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808 joins group g5_1 in Empty state. Adding to the group now.
690215:21:31.318 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808 with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808).
690315:21:31.319 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in PreparingRebalance state. Created a new member id consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86 and requesting the member to rejoin with this id.
690415:21:31.319 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86
690515:21:31.319 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
690615:21:31.320 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86 joins group g5_1 in PreparingRebalance state. Adding to the group now.
690715:21:31.339 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
690815:21:31.339 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_2-0)
690915:21:31.339 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_2-0 with topic id luJObqVwRveObARjBBPdAQ.
691015:21:31.342 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_2-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
691115:21:31.343 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_2-0 in /tmp/kafka-logs5982689497894266552/t5_2-0 with properties {}
691215:21:31.343 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] No checkpointed highwatermark is found for partition t5_2-0
691315:21:31.343 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] Log loaded for partition t5_2-0 with initial high watermark 0
691415:21:31.343 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_2-0 with topic id Some(luJObqVwRveObARjBBPdAQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
691515:21:37.320 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 1 with 2 members.
691615:21:37.320 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808', protocol='range'}
691715:21:37.320 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86', protocol='range'}
691815:21:37.322 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Finished assignment for group at generation 1: {consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808=Assignment(partitions=[t5_2-0]), consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86=Assignment(partitions=[t5_1-0])}
691915:21:37.322 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808 for group g5_1 for generation 1. The group has 2 members, 0 of which are static.
692015:21:37.328 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808', protocol='range'}
692115:21:37.328 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86', protocol='range'}
692215:21:37.329 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
692315:21:37.329 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_2-0])
692415:21:37.329 [virtual-661] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
692515:21:37.329 [virtual-664] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Adding newly assigned partitions: [t5_2-0]
692615:21:37.330 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Found no committed offset for partition t5_1-0
692715:21:37.330 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Found no committed offset for partition t5_2-0
692815:21:37.331 [virtual-661] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
692915:21:37.331 [virtual-664] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting offset for partition t5_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
693015:21:39.346 [virtual-663] ERROR o.k.KafkaFlow$ - Exception when polling for records
6931java.lang.InterruptedException: null
6932 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
6933 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
6934 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
6935 at ox.channels.ActorRef.ask(actor.scala:64)
6936 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
6937 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
6938 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6939 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6940 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
6941 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
6942 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
6943 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6944 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6945 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
694615:21:39.346 [virtual-661] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6947java.lang.InterruptedException: null
6948 ... 18 common frames omitted
6949Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
6950 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
6951 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
6952 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
6953 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
6954 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
6955 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
6956 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
6957 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
6958 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
6959 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
6960 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6961 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6962 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
6963 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
6964 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
6965 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6966 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6967 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
696815:21:39.346 [virtual-667] ERROR o.k.KafkaFlow$ - Exception when polling for records
6969java.lang.InterruptedException: null
6970 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
6971 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
6972 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
6973 at ox.channels.ActorRef.ask(actor.scala:64)
6974 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
6975 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
6976 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6977 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6978 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
6979 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6980 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6981 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
6982 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
6983 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6984 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
6985 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
6986 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
6987 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
6988 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6989 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6990 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
699115:21:39.346 [virtual-664] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6992java.lang.InterruptedException: null
6993 ... 18 common frames omitted
6994Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
6995 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
6996 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
6997 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
6998 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
6999 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7000 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7001 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7002 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7003 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7004 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7005 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7006 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7007 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7008 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7009 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7010 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7011 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7012 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
701315:21:39.346 [virtual-673] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
701415:21:39.347 [virtual-675] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Revoke previously assigned partitions [t5_2-0]
701515:21:39.347 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Member consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
701615:21:39.347 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
701715:21:39.347 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
701815:21:39.349 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
701915:21:39.349 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_1-8-8ebff4bc-21c3-4e0f-9468-e76e80e9e808) members.).
702015:21:39.349 [virtual-674] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
702115:21:39.349 [virtual-674] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Member consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
702215:21:39.349 [virtual-674] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
702315:21:39.349 [virtual-674] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
702415:21:39.350 [virtual-673] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
702515:21:39.350 [virtual-673] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
702615:21:39.350 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-9-e09db878-c011-4ede-b0df-6d673381ec86 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
702715:21:39.350 [virtual-673] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
702815:21:39.350 [virtual-673] INFO o.a.k.c.m.Metrics - Metrics reporters closed
702915:21:39.350 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 2 is now empty.
703015:21:39.350 [virtual-673] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-15 unregistered
703115:21:39.351 [virtual-675] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
703215:21:39.351 [virtual-675] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
703315:21:39.351 [virtual-675] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
703415:21:39.351 [virtual-675] INFO o.a.k.c.m.Metrics - Metrics reporters closed
703515:21:39.353 [virtual-675] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-8 unregistered
703615:21:39.842 [virtual-674] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
703715:21:39.843 [virtual-674] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
703815:21:39.843 [virtual-674] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
703915:21:39.843 [virtual-674] INFO o.a.k.c.m.Metrics - Metrics reporters closed
704015:21:39.844 [virtual-674] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-9 unregistered
704115:21:39.845 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7042 acks = -1
7043 batch.size = 16384
7044 bootstrap.servers = [localhost:6001]
7045 buffer.memory = 33554432
7046 client.dns.lookup = use_all_dns_ips
7047 client.id = producer-16
7048 compression.gzip.level = -1
7049 compression.lz4.level = 9
7050 compression.type = none
7051 compression.zstd.level = 3
7052 connections.max.idle.ms = 540000
7053 delivery.timeout.ms = 120000
7054 enable.idempotence = true
7055 enable.metrics.push = true
7056 interceptor.classes = []
7057 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7058 linger.ms = 5
7059 max.block.ms = 10000
7060 max.in.flight.requests.per.connection = 5
7061 max.request.size = 1048576
7062 metadata.max.age.ms = 300000
7063 metadata.max.idle.ms = 300000
7064 metadata.recovery.rebootstrap.trigger.ms = 300000
7065 metadata.recovery.strategy = rebootstrap
7066 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7067 metrics.num.samples = 2
7068 metrics.recording.level = INFO
7069 metrics.sample.window.ms = 30000
7070 partitioner.adaptive.partitioning.enable = true
7071 partitioner.availability.timeout.ms = 0
7072 partitioner.class = null
7073 partitioner.ignore.keys = false
7074 receive.buffer.bytes = 32768
7075 reconnect.backoff.max.ms = 1000
7076 reconnect.backoff.ms = 50
7077 request.timeout.ms = 30000
7078 retries = 2147483647
7079 retry.backoff.max.ms = 1000
7080 retry.backoff.ms = 1000
7081 sasl.client.callback.handler.class = null
7082 sasl.jaas.config = null
7083 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7084 sasl.kerberos.min.time.before.relogin = 60000
7085 sasl.kerberos.service.name = null
7086 sasl.kerberos.ticket.renew.jitter = 0.05
7087 sasl.kerberos.ticket.renew.window.factor = 0.8
7088 sasl.login.callback.handler.class = null
7089 sasl.login.class = null
7090 sasl.login.connect.timeout.ms = null
7091 sasl.login.read.timeout.ms = null
7092 sasl.login.refresh.buffer.seconds = 300
7093 sasl.login.refresh.min.period.seconds = 60
7094 sasl.login.refresh.window.factor = 0.8
7095 sasl.login.refresh.window.jitter = 0.05
7096 sasl.login.retry.backoff.max.ms = 10000
7097 sasl.login.retry.backoff.ms = 100
7098 sasl.mechanism = GSSAPI
7099 sasl.oauthbearer.assertion.algorithm = RS256
7100 sasl.oauthbearer.assertion.claim.aud = null
7101 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7102 sasl.oauthbearer.assertion.claim.iss = null
7103 sasl.oauthbearer.assertion.claim.jti.include = false
7104 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7105 sasl.oauthbearer.assertion.claim.sub = null
7106 sasl.oauthbearer.assertion.file = null
7107 sasl.oauthbearer.assertion.private.key.file = null
7108 sasl.oauthbearer.assertion.private.key.passphrase = null
7109 sasl.oauthbearer.assertion.template.file = null
7110 sasl.oauthbearer.client.credentials.client.id = null
7111 sasl.oauthbearer.client.credentials.client.secret = null
7112 sasl.oauthbearer.clock.skew.seconds = 30
7113 sasl.oauthbearer.expected.audience = null
7114 sasl.oauthbearer.expected.issuer = null
7115 sasl.oauthbearer.header.urlencode = false
7116 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7117 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7118 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7119 sasl.oauthbearer.jwks.endpoint.url = null
7120 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7121 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7122 sasl.oauthbearer.scope = null
7123 sasl.oauthbearer.scope.claim.name = scope
7124 sasl.oauthbearer.sub.claim.name = sub
7125 sasl.oauthbearer.token.endpoint.url = null
7126 security.protocol = PLAINTEXT
7127 security.providers = null
7128 send.buffer.bytes = 131072
7129 socket.connection.setup.timeout.max.ms = 30000
7130 socket.connection.setup.timeout.ms = 10000
7131 ssl.cipher.suites = null
7132 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7133 ssl.endpoint.identification.algorithm = https
7134 ssl.engine.factory.class = null
7135 ssl.key.password = null
7136 ssl.keymanager.algorithm = SunX509
7137 ssl.keystore.certificate.chain = null
7138 ssl.keystore.key = null
7139 ssl.keystore.location = null
7140 ssl.keystore.password = null
7141 ssl.keystore.type = JKS
7142 ssl.protocol = TLSv1.3
7143 ssl.provider = null
7144 ssl.secure.random.implementation = null
7145 ssl.trustmanager.algorithm = PKIX
7146 ssl.truststore.certificates = null
7147 ssl.truststore.location = null
7148 ssl.truststore.password = null
7149 ssl.truststore.type = JKS
7150 transaction.timeout.ms = 60000
7151 transaction.two.phase.commit.enable = false
7152 transactional.id = null
7153 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7154
715515:21:39.845 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
715615:21:39.846 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Instantiated an idempotent producer.
715715:21:39.847 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
715815:21:39.847 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
715915:21:39.847 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314099847
716015:21:39.850 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.Metadata - [Producer clientId=producer-16] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
716115:21:39.850 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-16] ProducerId set to 15 with epoch 0
716215:21:39.858 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
716315:21:39.860 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
716415:21:39.860 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
716515:21:39.860 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
716615:21:39.860 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
716715:21:39.860 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-16 unregistered
716815:21:39.862 [virtual-677] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7169 allow.auto.create.topics = true
7170 auto.commit.interval.ms = 5000
7171 auto.offset.reset = earliest
7172 bootstrap.servers = [localhost:6001]
7173 check.crcs = true
7174 client.dns.lookup = use_all_dns_ips
7175 client.id = consumer-g5_1-10
7176 client.rack =
7177 connections.max.idle.ms = 540000
7178 default.api.timeout.ms = 60000
7179 enable.auto.commit = false
7180 enable.metrics.push = true
7181 exclude.internal.topics = true
7182 fetch.max.bytes = 52428800
7183 fetch.max.wait.ms = 500
7184 fetch.min.bytes = 1
7185 group.id = g5_1
7186 group.instance.id = null
7187 group.protocol = classic
7188 group.remote.assignor = null
7189 heartbeat.interval.ms = 3000
7190 interceptor.classes = []
7191 internal.leave.group.on.close = true
7192 internal.throw.on.fetch.stable.offset.unsupported = false
7193 isolation.level = read_uncommitted
7194 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7195 max.partition.fetch.bytes = 1048576
7196 max.poll.interval.ms = 300000
7197 max.poll.records = 500
7198 metadata.max.age.ms = 300000
7199 metadata.recovery.rebootstrap.trigger.ms = 300000
7200 metadata.recovery.strategy = rebootstrap
7201 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7202 metrics.num.samples = 2
7203 metrics.recording.level = INFO
7204 metrics.sample.window.ms = 30000
7205 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7206 receive.buffer.bytes = 65536
7207 reconnect.backoff.max.ms = 1000
7208 reconnect.backoff.ms = 50
7209 request.timeout.ms = 30000
7210 retry.backoff.max.ms = 1000
7211 retry.backoff.ms = 100
7212 sasl.client.callback.handler.class = null
7213 sasl.jaas.config = null
7214 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7215 sasl.kerberos.min.time.before.relogin = 60000
7216 sasl.kerberos.service.name = null
7217 sasl.kerberos.ticket.renew.jitter = 0.05
7218 sasl.kerberos.ticket.renew.window.factor = 0.8
7219 sasl.login.callback.handler.class = null
7220 sasl.login.class = null
7221 sasl.login.connect.timeout.ms = null
7222 sasl.login.read.timeout.ms = null
7223 sasl.login.refresh.buffer.seconds = 300
7224 sasl.login.refresh.min.period.seconds = 60
7225 sasl.login.refresh.window.factor = 0.8
7226 sasl.login.refresh.window.jitter = 0.05
7227 sasl.login.retry.backoff.max.ms = 10000
7228 sasl.login.retry.backoff.ms = 100
7229 sasl.mechanism = GSSAPI
7230 sasl.oauthbearer.assertion.algorithm = RS256
7231 sasl.oauthbearer.assertion.claim.aud = null
7232 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7233 sasl.oauthbearer.assertion.claim.iss = null
7234 sasl.oauthbearer.assertion.claim.jti.include = false
7235 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7236 sasl.oauthbearer.assertion.claim.sub = null
7237 sasl.oauthbearer.assertion.file = null
7238 sasl.oauthbearer.assertion.private.key.file = null
7239 sasl.oauthbearer.assertion.private.key.passphrase = null
7240 sasl.oauthbearer.assertion.template.file = null
7241 sasl.oauthbearer.client.credentials.client.id = null
7242 sasl.oauthbearer.client.credentials.client.secret = null
7243 sasl.oauthbearer.clock.skew.seconds = 30
7244 sasl.oauthbearer.expected.audience = null
7245 sasl.oauthbearer.expected.issuer = null
7246 sasl.oauthbearer.header.urlencode = false
7247 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7248 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7249 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7250 sasl.oauthbearer.jwks.endpoint.url = null
7251 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7252 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7253 sasl.oauthbearer.scope = null
7254 sasl.oauthbearer.scope.claim.name = scope
7255 sasl.oauthbearer.sub.claim.name = sub
7256 sasl.oauthbearer.token.endpoint.url = null
7257 security.protocol = PLAINTEXT
7258 security.providers = null
7259 send.buffer.bytes = 131072
7260 session.timeout.ms = 45000
7261 share.acknowledgement.mode = implicit
7262 socket.connection.setup.timeout.max.ms = 30000
7263 socket.connection.setup.timeout.ms = 10000
7264 ssl.cipher.suites = null
7265 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7266 ssl.endpoint.identification.algorithm = https
7267 ssl.engine.factory.class = null
7268 ssl.key.password = null
7269 ssl.keymanager.algorithm = SunX509
7270 ssl.keystore.certificate.chain = null
7271 ssl.keystore.key = null
7272 ssl.keystore.location = null
7273 ssl.keystore.password = null
7274 ssl.keystore.type = JKS
7275 ssl.protocol = TLSv1.3
7276 ssl.provider = null
7277 ssl.secure.random.implementation = null
7278 ssl.trustmanager.algorithm = PKIX
7279 ssl.truststore.certificates = null
7280 ssl.truststore.location = null
7281 ssl.truststore.password = null
7282 ssl.truststore.type = JKS
7283 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7284
728515:21:39.862 [virtual-677] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
728615:21:39.865 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
728715:21:39.865 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
728815:21:39.865 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314099865
728915:21:39.866 [virtual-680] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Subscribed to topic(s): t5_1
729015:21:39.869 [virtual-680] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
729115:21:39.869 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
729215:21:39.870 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
729315:21:39.872 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a and requesting the member to rejoin with this id.
729415:21:39.872 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a
729515:21:39.872 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
729615:21:39.873 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a joins group g5_1 in Empty state. Adding to the group now.
729715:21:39.873 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a).
729815:21:42.873 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 3 with 1 members.
729915:21:42.874 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a', protocol='range'}
730015:21:42.874 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Finished assignment for group at generation 3: {consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a=Assignment(partitions=[t5_1-0])}
730115:21:42.875 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a for group g5_1 for generation 3. The group has 1 members, 0 of which are static.
730215:21:42.881 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a', protocol='range'}
730315:21:42.881 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
730415:21:42.881 [virtual-680] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
730515:21:42.882 [virtual-680] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t5_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
730615:21:42.886 [virtual-677] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7307 allow.auto.create.topics = true
7308 auto.commit.interval.ms = 5000
7309 auto.offset.reset = earliest
7310 bootstrap.servers = [localhost:6001]
7311 check.crcs = true
7312 client.dns.lookup = use_all_dns_ips
7313 client.id = consumer-g5_2-11
7314 client.rack =
7315 connections.max.idle.ms = 540000
7316 default.api.timeout.ms = 60000
7317 enable.auto.commit = false
7318 enable.metrics.push = true
7319 exclude.internal.topics = true
7320 fetch.max.bytes = 52428800
7321 fetch.max.wait.ms = 500
7322 fetch.min.bytes = 1
7323 group.id = g5_2
7324 group.instance.id = null
7325 group.protocol = classic
7326 group.remote.assignor = null
7327 heartbeat.interval.ms = 3000
7328 interceptor.classes = []
7329 internal.leave.group.on.close = true
7330 internal.throw.on.fetch.stable.offset.unsupported = false
7331 isolation.level = read_uncommitted
7332 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7333 max.partition.fetch.bytes = 1048576
7334 max.poll.interval.ms = 300000
7335 max.poll.records = 500
7336 metadata.max.age.ms = 300000
7337 metadata.recovery.rebootstrap.trigger.ms = 300000
7338 metadata.recovery.strategy = rebootstrap
7339 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7340 metrics.num.samples = 2
7341 metrics.recording.level = INFO
7342 metrics.sample.window.ms = 30000
7343 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7344 receive.buffer.bytes = 65536
7345 reconnect.backoff.max.ms = 1000
7346 reconnect.backoff.ms = 50
7347 request.timeout.ms = 30000
7348 retry.backoff.max.ms = 1000
7349 retry.backoff.ms = 100
7350 sasl.client.callback.handler.class = null
7351 sasl.jaas.config = null
7352 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7353 sasl.kerberos.min.time.before.relogin = 60000
7354 sasl.kerberos.service.name = null
7355 sasl.kerberos.ticket.renew.jitter = 0.05
7356 sasl.kerberos.ticket.renew.window.factor = 0.8
7357 sasl.login.callback.handler.class = null
7358 sasl.login.class = null
7359 sasl.login.connect.timeout.ms = null
7360 sasl.login.read.timeout.ms = null
7361 sasl.login.refresh.buffer.seconds = 300
7362 sasl.login.refresh.min.period.seconds = 60
7363 sasl.login.refresh.window.factor = 0.8
7364 sasl.login.refresh.window.jitter = 0.05
7365 sasl.login.retry.backoff.max.ms = 10000
7366 sasl.login.retry.backoff.ms = 100
7367 sasl.mechanism = GSSAPI
7368 sasl.oauthbearer.assertion.algorithm = RS256
7369 sasl.oauthbearer.assertion.claim.aud = null
7370 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7371 sasl.oauthbearer.assertion.claim.iss = null
7372 sasl.oauthbearer.assertion.claim.jti.include = false
7373 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7374 sasl.oauthbearer.assertion.claim.sub = null
7375 sasl.oauthbearer.assertion.file = null
7376 sasl.oauthbearer.assertion.private.key.file = null
7377 sasl.oauthbearer.assertion.private.key.passphrase = null
7378 sasl.oauthbearer.assertion.template.file = null
7379 sasl.oauthbearer.client.credentials.client.id = null
7380 sasl.oauthbearer.client.credentials.client.secret = null
7381 sasl.oauthbearer.clock.skew.seconds = 30
7382 sasl.oauthbearer.expected.audience = null
7383 sasl.oauthbearer.expected.issuer = null
7384 sasl.oauthbearer.header.urlencode = false
7385 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7386 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7387 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7388 sasl.oauthbearer.jwks.endpoint.url = null
7389 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7390 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7391 sasl.oauthbearer.scope = null
7392 sasl.oauthbearer.scope.claim.name = scope
7393 sasl.oauthbearer.sub.claim.name = sub
7394 sasl.oauthbearer.token.endpoint.url = null
7395 security.protocol = PLAINTEXT
7396 security.providers = null
7397 send.buffer.bytes = 131072
7398 session.timeout.ms = 45000
7399 share.acknowledgement.mode = implicit
7400 socket.connection.setup.timeout.max.ms = 30000
7401 socket.connection.setup.timeout.ms = 10000
7402 ssl.cipher.suites = null
7403 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7404 ssl.endpoint.identification.algorithm = https
7405 ssl.engine.factory.class = null
7406 ssl.key.password = null
7407 ssl.keymanager.algorithm = SunX509
7408 ssl.keystore.certificate.chain = null
7409 ssl.keystore.key = null
7410 ssl.keystore.location = null
7411 ssl.keystore.password = null
7412 ssl.keystore.type = JKS
7413 ssl.protocol = TLSv1.3
7414 ssl.provider = null
7415 ssl.secure.random.implementation = null
7416 ssl.trustmanager.algorithm = PKIX
7417 ssl.truststore.certificates = null
7418 ssl.truststore.location = null
7419 ssl.truststore.password = null
7420 ssl.truststore.type = JKS
7421 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7422
742315:21:42.886 [virtual-677] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
742415:21:42.889 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
742515:21:42.889 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
742615:21:42.889 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314102889
742715:21:42.889 [virtual-684] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Subscribed to topic(s): t5_1
742815:21:42.891 [virtual-684] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
742915:21:42.892 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
743015:21:42.893 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
743115:21:42.895 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_2 in Empty state. Created a new member id consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4 and requesting the member to rejoin with this id.
743215:21:42.896 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: need to re-join with the given member-id: consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4
743315:21:42.896 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
743415:21:42.897 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4 joins group g5_2 in Empty state. Adding to the group now.
743515:21:42.897 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4 with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4).
743615:21:45.897 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_2 generation 1 with 1 members.
743715:21:45.898 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4', protocol='range'}
743815:21:45.898 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Finished assignment for group at generation 1: {consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4=Assignment(partitions=[t5_1-0])}
743915:21:45.899 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4 for group g5_2 for generation 1. The group has 1 members, 0 of which are static.
744015:21:45.905 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4', protocol='range'}
744115:21:45.905 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Notifying assignor about the new Assignment(partitions=[t5_1-0])
744215:21:45.905 [virtual-684] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Adding newly assigned partitions: [t5_1-0]
744315:21:45.906 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Found no committed offset for partition t5_1-0
744415:21:45.907 [virtual-684] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
744515:21:45.909 [virtual-683] ERROR o.k.KafkaFlow$ - Exception when polling for records
7446java.lang.InterruptedException: null
7447 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7448 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7449 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7450 at ox.channels.ActorRef.ask(actor.scala:64)
7451 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7452 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7453 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7454 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7455 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7456 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7457 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7458 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7459 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7460 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
746115:21:45.909 [virtual-680] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7462java.lang.InterruptedException: null
7463 ... 18 common frames omitted
7464Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7465 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7466 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7467 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7468 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7469 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7470 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7471 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7472 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7473 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7474 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7475 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7476 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7477 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7478 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7479 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7480 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7481 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7482 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
748315:21:45.909 [virtual-684] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7484java.lang.InterruptedException: null
7485 ... 18 common frames omitted
7486Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7487 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7488 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7489 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7490 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7491 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7492 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7493 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7494 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7495 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7496 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7497 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7498 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7499 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7500 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7501 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7502 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7503 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7504 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
750515:21:45.909 [virtual-679] ERROR o.k.KafkaFlow$ - Exception when polling for records
7506java.lang.InterruptedException: null
7507 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7508 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7509 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7510 at ox.channels.ActorRef.ask(actor.scala:64)
7511 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7512 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7513 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7514 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7515 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7516 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7517 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7518 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7519 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7520 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
752115:21:45.910 [virtual-686] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
752215:21:45.910 [virtual-686] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Member consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
752315:21:45.910 [virtual-686] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
752415:21:45.910 [virtual-686] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
752515:21:45.910 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
752615:21:45.910 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g5_1-10-e4d4fec1-b448-45ec-9217-d183511d0c0a) members.).
752715:21:45.910 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 4 is now empty.
752815:21:45.911 [virtual-687] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Revoke previously assigned partitions [t5_1-0]
752915:21:45.911 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Member consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
753015:21:45.911 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting generation and member id due to: consumer pro-actively leaving the group
753115:21:45.911 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: consumer pro-actively leaving the group
753215:21:45.912 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_2] Member consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
753315:21:45.912 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_2-11-4ec671bb-82aa-4cbb-990a-cc952f1fd8e4) members.).
753415:21:45.912 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_2 with generation 2 is now empty.
753515:21:46.398 [virtual-686] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
753615:21:46.399 [virtual-686] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
753715:21:46.399 [virtual-686] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
753815:21:46.399 [virtual-686] INFO o.a.k.c.m.Metrics - Metrics reporters closed
753915:21:46.401 [virtual-686] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-10 unregistered
754015:21:46.410 [virtual-687] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
754115:21:46.410 [virtual-687] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
754215:21:46.410 [virtual-687] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
754315:21:46.410 [virtual-687] INFO o.a.k.c.m.Metrics - Metrics reporters closed
754415:21:46.413 [virtual-687] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_2-11 unregistered
754515:21:46.415 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7546 acks = -1
7547 batch.size = 16384
7548 bootstrap.servers = [localhost:6001]
7549 buffer.memory = 33554432
7550 client.dns.lookup = use_all_dns_ips
7551 client.id = producer-17
7552 compression.gzip.level = -1
7553 compression.lz4.level = 9
7554 compression.type = none
7555 compression.zstd.level = 3
7556 connections.max.idle.ms = 540000
7557 delivery.timeout.ms = 120000
7558 enable.idempotence = true
7559 enable.metrics.push = true
7560 interceptor.classes = []
7561 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7562 linger.ms = 5
7563 max.block.ms = 10000
7564 max.in.flight.requests.per.connection = 5
7565 max.request.size = 1048576
7566 metadata.max.age.ms = 300000
7567 metadata.max.idle.ms = 300000
7568 metadata.recovery.rebootstrap.trigger.ms = 300000
7569 metadata.recovery.strategy = rebootstrap
7570 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7571 metrics.num.samples = 2
7572 metrics.recording.level = INFO
7573 metrics.sample.window.ms = 30000
7574 partitioner.adaptive.partitioning.enable = true
7575 partitioner.availability.timeout.ms = 0
7576 partitioner.class = null
7577 partitioner.ignore.keys = false
7578 receive.buffer.bytes = 32768
7579 reconnect.backoff.max.ms = 1000
7580 reconnect.backoff.ms = 50
7581 request.timeout.ms = 30000
7582 retries = 2147483647
7583 retry.backoff.max.ms = 1000
7584 retry.backoff.ms = 1000
7585 sasl.client.callback.handler.class = null
7586 sasl.jaas.config = null
7587 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7588 sasl.kerberos.min.time.before.relogin = 60000
7589 sasl.kerberos.service.name = null
7590 sasl.kerberos.ticket.renew.jitter = 0.05
7591 sasl.kerberos.ticket.renew.window.factor = 0.8
7592 sasl.login.callback.handler.class = null
7593 sasl.login.class = null
7594 sasl.login.connect.timeout.ms = null
7595 sasl.login.read.timeout.ms = null
7596 sasl.login.refresh.buffer.seconds = 300
7597 sasl.login.refresh.min.period.seconds = 60
7598 sasl.login.refresh.window.factor = 0.8
7599 sasl.login.refresh.window.jitter = 0.05
7600 sasl.login.retry.backoff.max.ms = 10000
7601 sasl.login.retry.backoff.ms = 100
7602 sasl.mechanism = GSSAPI
7603 sasl.oauthbearer.assertion.algorithm = RS256
7604 sasl.oauthbearer.assertion.claim.aud = null
7605 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7606 sasl.oauthbearer.assertion.claim.iss = null
7607 sasl.oauthbearer.assertion.claim.jti.include = false
7608 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7609 sasl.oauthbearer.assertion.claim.sub = null
7610 sasl.oauthbearer.assertion.file = null
7611 sasl.oauthbearer.assertion.private.key.file = null
7612 sasl.oauthbearer.assertion.private.key.passphrase = null
7613 sasl.oauthbearer.assertion.template.file = null
7614 sasl.oauthbearer.client.credentials.client.id = null
7615 sasl.oauthbearer.client.credentials.client.secret = null
7616 sasl.oauthbearer.clock.skew.seconds = 30
7617 sasl.oauthbearer.expected.audience = null
7618 sasl.oauthbearer.expected.issuer = null
7619 sasl.oauthbearer.header.urlencode = false
7620 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7621 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7622 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7623 sasl.oauthbearer.jwks.endpoint.url = null
7624 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7625 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7626 sasl.oauthbearer.scope = null
7627 sasl.oauthbearer.scope.claim.name = scope
7628 sasl.oauthbearer.sub.claim.name = sub
7629 sasl.oauthbearer.token.endpoint.url = null
7630 security.protocol = PLAINTEXT
7631 security.providers = null
7632 send.buffer.bytes = 131072
7633 socket.connection.setup.timeout.max.ms = 30000
7634 socket.connection.setup.timeout.ms = 10000
7635 ssl.cipher.suites = null
7636 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7637 ssl.endpoint.identification.algorithm = https
7638 ssl.engine.factory.class = null
7639 ssl.key.password = null
7640 ssl.keymanager.algorithm = SunX509
7641 ssl.keystore.certificate.chain = null
7642 ssl.keystore.key = null
7643 ssl.keystore.location = null
7644 ssl.keystore.password = null
7645 ssl.keystore.type = JKS
7646 ssl.protocol = TLSv1.3
7647 ssl.provider = null
7648 ssl.secure.random.implementation = null
7649 ssl.trustmanager.algorithm = PKIX
7650 ssl.truststore.certificates = null
7651 ssl.truststore.location = null
7652 ssl.truststore.password = null
7653 ssl.truststore.type = JKS
7654 transaction.timeout.ms = 60000
7655 transaction.two.phase.commit.enable = false
7656 transactional.id = null
7657 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7658
765915:21:46.415 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
766015:21:46.415 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Instantiated an idempotent producer.
766115:21:46.417 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
766215:21:46.417 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
766315:21:46.417 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314106417
766415:21:46.419 [data-plane-kafka-request-handler-5] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t6_1) to the active controller.
766515:21:46.420 [kafka-producer-network-thread | producer-17] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-17] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t6_1=UNKNOWN_TOPIC_OR_PARTITION}
766615:21:46.420 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.Metadata - [Producer clientId=producer-17] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
766715:21:46.420 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-17] ProducerId set to 16 with epoch 0
766815:21:46.421 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t6_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
766915:21:46.421 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t6_1 with topic ID Z1dFY3WnS7qxPAcadZNkFg.
767015:21:46.421 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t6_1-0 with topic ID Z1dFY3WnS7qxPAcadZNkFg and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
767115:21:46.447 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
767215:21:46.448 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t6_1-0)
767315:21:46.448 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t6_1-0 with topic id Z1dFY3WnS7qxPAcadZNkFg.
767415:21:46.450 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t6_1-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
767515:21:46.450 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t6_1-0 in /tmp/kafka-logs5982689497894266552/t6_1-0 with properties {}
767615:21:46.451 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] No checkpointed highwatermark is found for partition t6_1-0
767715:21:46.451 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] Log loaded for partition t6_1-0 with initial high watermark 0
767815:21:46.451 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t6_1-0 with topic id Some(Z1dFY3WnS7qxPAcadZNkFg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
767915:21:47.431 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
768015:21:47.432 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
768115:21:47.432 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
768215:21:47.432 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
768315:21:47.432 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
768415:21:47.433 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-17 unregistered
768515:21:47.433 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7686 acks = -1
7687 batch.size = 16384
7688 bootstrap.servers = [localhost:6001]
7689 buffer.memory = 33554432
7690 client.dns.lookup = use_all_dns_ips
7691 client.id = producer-18
7692 compression.gzip.level = -1
7693 compression.lz4.level = 9
7694 compression.type = none
7695 compression.zstd.level = 3
7696 connections.max.idle.ms = 540000
7697 delivery.timeout.ms = 120000
7698 enable.idempotence = true
7699 enable.metrics.push = true
7700 interceptor.classes = []
7701 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7702 linger.ms = 5
7703 max.block.ms = 10000
7704 max.in.flight.requests.per.connection = 5
7705 max.request.size = 1048576
7706 metadata.max.age.ms = 300000
7707 metadata.max.idle.ms = 300000
7708 metadata.recovery.rebootstrap.trigger.ms = 300000
7709 metadata.recovery.strategy = rebootstrap
7710 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7711 metrics.num.samples = 2
7712 metrics.recording.level = INFO
7713 metrics.sample.window.ms = 30000
7714 partitioner.adaptive.partitioning.enable = true
7715 partitioner.availability.timeout.ms = 0
7716 partitioner.class = null
7717 partitioner.ignore.keys = false
7718 receive.buffer.bytes = 32768
7719 reconnect.backoff.max.ms = 1000
7720 reconnect.backoff.ms = 50
7721 request.timeout.ms = 30000
7722 retries = 2147483647
7723 retry.backoff.max.ms = 1000
7724 retry.backoff.ms = 1000
7725 sasl.client.callback.handler.class = null
7726 sasl.jaas.config = null
7727 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7728 sasl.kerberos.min.time.before.relogin = 60000
7729 sasl.kerberos.service.name = null
7730 sasl.kerberos.ticket.renew.jitter = 0.05
7731 sasl.kerberos.ticket.renew.window.factor = 0.8
7732 sasl.login.callback.handler.class = null
7733 sasl.login.class = null
7734 sasl.login.connect.timeout.ms = null
7735 sasl.login.read.timeout.ms = null
7736 sasl.login.refresh.buffer.seconds = 300
7737 sasl.login.refresh.min.period.seconds = 60
7738 sasl.login.refresh.window.factor = 0.8
7739 sasl.login.refresh.window.jitter = 0.05
7740 sasl.login.retry.backoff.max.ms = 10000
7741 sasl.login.retry.backoff.ms = 100
7742 sasl.mechanism = GSSAPI
7743 sasl.oauthbearer.assertion.algorithm = RS256
7744 sasl.oauthbearer.assertion.claim.aud = null
7745 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7746 sasl.oauthbearer.assertion.claim.iss = null
7747 sasl.oauthbearer.assertion.claim.jti.include = false
7748 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7749 sasl.oauthbearer.assertion.claim.sub = null
7750 sasl.oauthbearer.assertion.file = null
7751 sasl.oauthbearer.assertion.private.key.file = null
7752 sasl.oauthbearer.assertion.private.key.passphrase = null
7753 sasl.oauthbearer.assertion.template.file = null
7754 sasl.oauthbearer.client.credentials.client.id = null
7755 sasl.oauthbearer.client.credentials.client.secret = null
7756 sasl.oauthbearer.clock.skew.seconds = 30
7757 sasl.oauthbearer.expected.audience = null
7758 sasl.oauthbearer.expected.issuer = null
7759 sasl.oauthbearer.header.urlencode = false
7760 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7761 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7762 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7763 sasl.oauthbearer.jwks.endpoint.url = null
7764 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7765 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7766 sasl.oauthbearer.scope = null
7767 sasl.oauthbearer.scope.claim.name = scope
7768 sasl.oauthbearer.sub.claim.name = sub
7769 sasl.oauthbearer.token.endpoint.url = null
7770 security.protocol = PLAINTEXT
7771 security.providers = null
7772 send.buffer.bytes = 131072
7773 socket.connection.setup.timeout.max.ms = 30000
7774 socket.connection.setup.timeout.ms = 10000
7775 ssl.cipher.suites = null
7776 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7777 ssl.endpoint.identification.algorithm = https
7778 ssl.engine.factory.class = null
7779 ssl.key.password = null
7780 ssl.keymanager.algorithm = SunX509
7781 ssl.keystore.certificate.chain = null
7782 ssl.keystore.key = null
7783 ssl.keystore.location = null
7784 ssl.keystore.password = null
7785 ssl.keystore.type = JKS
7786 ssl.protocol = TLSv1.3
7787 ssl.provider = null
7788 ssl.secure.random.implementation = null
7789 ssl.trustmanager.algorithm = PKIX
7790 ssl.truststore.certificates = null
7791 ssl.truststore.location = null
7792 ssl.truststore.password = null
7793 ssl.truststore.type = JKS
7794 transaction.timeout.ms = 60000
7795 transaction.two.phase.commit.enable = false
7796 transactional.id = null
7797 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7798
779915:21:47.433 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
780015:21:47.433 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Instantiated an idempotent producer.
780115:21:47.435 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
780215:21:47.435 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
780315:21:47.435 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314107435
780415:21:47.438 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.Metadata - [Producer clientId=producer-18] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
780515:21:47.438 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-18] ProducerId set to 17 with epoch 0
780615:21:47.447 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
780715:21:47.449 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
780815:21:47.449 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
780915:21:47.449 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
781015:21:47.449 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
781115:21:47.449 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-18 unregistered
781215:21:47.449 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7813 acks = -1
7814 batch.size = 16384
7815 bootstrap.servers = [localhost:6001]
7816 buffer.memory = 33554432
7817 client.dns.lookup = use_all_dns_ips
7818 client.id = producer-19
7819 compression.gzip.level = -1
7820 compression.lz4.level = 9
7821 compression.type = none
7822 compression.zstd.level = 3
7823 connections.max.idle.ms = 540000
7824 delivery.timeout.ms = 120000
7825 enable.idempotence = true
7826 enable.metrics.push = true
7827 interceptor.classes = []
7828 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7829 linger.ms = 5
7830 max.block.ms = 10000
7831 max.in.flight.requests.per.connection = 5
7832 max.request.size = 1048576
7833 metadata.max.age.ms = 300000
7834 metadata.max.idle.ms = 300000
7835 metadata.recovery.rebootstrap.trigger.ms = 300000
7836 metadata.recovery.strategy = rebootstrap
7837 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7838 metrics.num.samples = 2
7839 metrics.recording.level = INFO
7840 metrics.sample.window.ms = 30000
7841 partitioner.adaptive.partitioning.enable = true
7842 partitioner.availability.timeout.ms = 0
7843 partitioner.class = null
7844 partitioner.ignore.keys = false
7845 receive.buffer.bytes = 32768
7846 reconnect.backoff.max.ms = 1000
7847 reconnect.backoff.ms = 50
7848 request.timeout.ms = 30000
7849 retries = 2147483647
7850 retry.backoff.max.ms = 1000
7851 retry.backoff.ms = 1000
7852 sasl.client.callback.handler.class = null
7853 sasl.jaas.config = null
7854 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7855 sasl.kerberos.min.time.before.relogin = 60000
7856 sasl.kerberos.service.name = null
7857 sasl.kerberos.ticket.renew.jitter = 0.05
7858 sasl.kerberos.ticket.renew.window.factor = 0.8
7859 sasl.login.callback.handler.class = null
7860 sasl.login.class = null
7861 sasl.login.connect.timeout.ms = null
7862 sasl.login.read.timeout.ms = null
7863 sasl.login.refresh.buffer.seconds = 300
7864 sasl.login.refresh.min.period.seconds = 60
7865 sasl.login.refresh.window.factor = 0.8
7866 sasl.login.refresh.window.jitter = 0.05
7867 sasl.login.retry.backoff.max.ms = 10000
7868 sasl.login.retry.backoff.ms = 100
7869 sasl.mechanism = GSSAPI
7870 sasl.oauthbearer.assertion.algorithm = RS256
7871 sasl.oauthbearer.assertion.claim.aud = null
7872 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7873 sasl.oauthbearer.assertion.claim.iss = null
7874 sasl.oauthbearer.assertion.claim.jti.include = false
7875 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7876 sasl.oauthbearer.assertion.claim.sub = null
7877 sasl.oauthbearer.assertion.file = null
7878 sasl.oauthbearer.assertion.private.key.file = null
7879 sasl.oauthbearer.assertion.private.key.passphrase = null
7880 sasl.oauthbearer.assertion.template.file = null
7881 sasl.oauthbearer.client.credentials.client.id = null
7882 sasl.oauthbearer.client.credentials.client.secret = null
7883 sasl.oauthbearer.clock.skew.seconds = 30
7884 sasl.oauthbearer.expected.audience = null
7885 sasl.oauthbearer.expected.issuer = null
7886 sasl.oauthbearer.header.urlencode = false
7887 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7888 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7889 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7890 sasl.oauthbearer.jwks.endpoint.url = null
7891 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7892 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7893 sasl.oauthbearer.scope = null
7894 sasl.oauthbearer.scope.claim.name = scope
7895 sasl.oauthbearer.sub.claim.name = sub
7896 sasl.oauthbearer.token.endpoint.url = null
7897 security.protocol = PLAINTEXT
7898 security.providers = null
7899 send.buffer.bytes = 131072
7900 socket.connection.setup.timeout.max.ms = 30000
7901 socket.connection.setup.timeout.ms = 10000
7902 ssl.cipher.suites = null
7903 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7904 ssl.endpoint.identification.algorithm = https
7905 ssl.engine.factory.class = null
7906 ssl.key.password = null
7907 ssl.keymanager.algorithm = SunX509
7908 ssl.keystore.certificate.chain = null
7909 ssl.keystore.key = null
7910 ssl.keystore.location = null
7911 ssl.keystore.password = null
7912 ssl.keystore.type = JKS
7913 ssl.protocol = TLSv1.3
7914 ssl.provider = null
7915 ssl.secure.random.implementation = null
7916 ssl.trustmanager.algorithm = PKIX
7917 ssl.truststore.certificates = null
7918 ssl.truststore.location = null
7919 ssl.truststore.password = null
7920 ssl.truststore.type = JKS
7921 transaction.timeout.ms = 60000
7922 transaction.two.phase.commit.enable = false
7923 transactional.id = null
7924 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7925
792615:21:47.450 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
792715:21:47.450 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Instantiated an idempotent producer.
792815:21:47.452 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
792915:21:47.452 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
793015:21:47.452 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314107452
793115:21:47.454 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.Metadata - [Producer clientId=producer-19] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
793215:21:47.455 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-19] ProducerId set to 18 with epoch 0
793315:21:47.504 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
793415:21:47.507 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
793515:21:47.507 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
793615:21:47.507 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
793715:21:47.507 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
793815:21:47.507 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-19 unregistered
793915:21:47.512 [virtual-693] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7940 allow.auto.create.topics = true
7941 auto.commit.interval.ms = 5000
7942 auto.offset.reset = earliest
7943 bootstrap.servers = [localhost:6001]
7944 check.crcs = true
7945 client.dns.lookup = use_all_dns_ips
7946 client.id = consumer-g6_1-12
7947 client.rack =
7948 connections.max.idle.ms = 540000
7949 default.api.timeout.ms = 60000
7950 enable.auto.commit = false
7951 enable.metrics.push = true
7952 exclude.internal.topics = true
7953 fetch.max.bytes = 52428800
7954 fetch.max.wait.ms = 500
7955 fetch.min.bytes = 1
7956 group.id = g6_1
7957 group.instance.id = null
7958 group.protocol = classic
7959 group.remote.assignor = null
7960 heartbeat.interval.ms = 3000
7961 interceptor.classes = []
7962 internal.leave.group.on.close = true
7963 internal.throw.on.fetch.stable.offset.unsupported = false
7964 isolation.level = read_uncommitted
7965 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7966 max.partition.fetch.bytes = 1048576
7967 max.poll.interval.ms = 300000
7968 max.poll.records = 500
7969 metadata.max.age.ms = 300000
7970 metadata.recovery.rebootstrap.trigger.ms = 300000
7971 metadata.recovery.strategy = rebootstrap
7972 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7973 metrics.num.samples = 2
7974 metrics.recording.level = INFO
7975 metrics.sample.window.ms = 30000
7976 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7977 receive.buffer.bytes = 65536
7978 reconnect.backoff.max.ms = 1000
7979 reconnect.backoff.ms = 50
7980 request.timeout.ms = 30000
7981 retry.backoff.max.ms = 1000
7982 retry.backoff.ms = 100
7983 sasl.client.callback.handler.class = null
7984 sasl.jaas.config = null
7985 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7986 sasl.kerberos.min.time.before.relogin = 60000
7987 sasl.kerberos.service.name = null
7988 sasl.kerberos.ticket.renew.jitter = 0.05
7989 sasl.kerberos.ticket.renew.window.factor = 0.8
7990 sasl.login.callback.handler.class = null
7991 sasl.login.class = null
7992 sasl.login.connect.timeout.ms = null
7993 sasl.login.read.timeout.ms = null
7994 sasl.login.refresh.buffer.seconds = 300
7995 sasl.login.refresh.min.period.seconds = 60
7996 sasl.login.refresh.window.factor = 0.8
7997 sasl.login.refresh.window.jitter = 0.05
7998 sasl.login.retry.backoff.max.ms = 10000
7999 sasl.login.retry.backoff.ms = 100
8000 sasl.mechanism = GSSAPI
8001 sasl.oauthbearer.assertion.algorithm = RS256
8002 sasl.oauthbearer.assertion.claim.aud = null
8003 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8004 sasl.oauthbearer.assertion.claim.iss = null
8005 sasl.oauthbearer.assertion.claim.jti.include = false
8006 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8007 sasl.oauthbearer.assertion.claim.sub = null
8008 sasl.oauthbearer.assertion.file = null
8009 sasl.oauthbearer.assertion.private.key.file = null
8010 sasl.oauthbearer.assertion.private.key.passphrase = null
8011 sasl.oauthbearer.assertion.template.file = null
8012 sasl.oauthbearer.client.credentials.client.id = null
8013 sasl.oauthbearer.client.credentials.client.secret = null
8014 sasl.oauthbearer.clock.skew.seconds = 30
8015 sasl.oauthbearer.expected.audience = null
8016 sasl.oauthbearer.expected.issuer = null
8017 sasl.oauthbearer.header.urlencode = false
8018 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8019 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8020 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8021 sasl.oauthbearer.jwks.endpoint.url = null
8022 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8023 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8024 sasl.oauthbearer.scope = null
8025 sasl.oauthbearer.scope.claim.name = scope
8026 sasl.oauthbearer.sub.claim.name = sub
8027 sasl.oauthbearer.token.endpoint.url = null
8028 security.protocol = PLAINTEXT
8029 security.providers = null
8030 send.buffer.bytes = 131072
8031 session.timeout.ms = 45000
8032 share.acknowledgement.mode = implicit
8033 socket.connection.setup.timeout.max.ms = 30000
8034 socket.connection.setup.timeout.ms = 10000
8035 ssl.cipher.suites = null
8036 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8037 ssl.endpoint.identification.algorithm = https
8038 ssl.engine.factory.class = null
8039 ssl.key.password = null
8040 ssl.keymanager.algorithm = SunX509
8041 ssl.keystore.certificate.chain = null
8042 ssl.keystore.key = null
8043 ssl.keystore.location = null
8044 ssl.keystore.password = null
8045 ssl.keystore.type = JKS
8046 ssl.protocol = TLSv1.3
8047 ssl.provider = null
8048 ssl.secure.random.implementation = null
8049 ssl.trustmanager.algorithm = PKIX
8050 ssl.truststore.certificates = null
8051 ssl.truststore.location = null
8052 ssl.truststore.password = null
8053 ssl.truststore.type = JKS
8054 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8055
805615:21:47.513 [virtual-693] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
805715:21:47.519 [virtual-693] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
805815:21:47.519 [virtual-693] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
805915:21:47.522 [virtual-693] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314107519
806015:21:47.528 [virtual-694] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Subscribed to topic(s): t6_1
806115:21:47.532 [virtual-694] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
806215:21:47.533 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
806315:21:47.534 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
806415:21:47.537 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401 and requesting the member to rejoin with this id.
806515:21:47.538 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401
806615:21:47.539 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
806715:21:47.540 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401 joins group g6_1 in Empty state. Adding to the group now.
806815:21:47.540 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401).
806915:21:50.540 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 1 with 1 members.
807015:21:50.541 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401', protocol='range'}
807115:21:50.541 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Finished assignment for group at generation 1: {consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401=Assignment(partitions=[t6_1-0])}
807215:21:50.542 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401 for group g6_1 for generation 1. The group has 1 members, 0 of which are static.
807315:21:50.548 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401', protocol='range'}
807415:21:50.549 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
807515:21:50.549 [virtual-694] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
807615:21:50.550 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Found no committed offset for partition t6_1-0
807715:21:50.553 [virtual-694] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
807815:21:52.556 [virtual-696] ERROR o.k.KafkaFlow$ - Exception when polling for records
8079java.lang.InterruptedException: null
8080 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8081 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8082 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8083 at ox.channels.ActorRef.ask(actor.scala:64)
8084 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8085 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
8086 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8087 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8088 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
8089 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
8090 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
8091 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
8092 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8093 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
8094 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
8095 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
8096 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
8097 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8098 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8099 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
810015:21:52.556 [virtual-694] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8101java.lang.InterruptedException: null
8102 ... 18 common frames omitted
8103Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8104 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8105 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8106 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8107 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8108 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8109 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8110 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8111 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8112 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8113 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8114 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8115 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8116 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8117 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8118 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8119 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8120 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8121 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
812215:21:52.557 [virtual-701] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
812315:21:52.557 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Member consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
812415:21:52.558 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
812515:21:52.558 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
812615:21:52.558 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
812715:21:52.558 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_1-12-6cfa5c5a-8388-447d-8846-d1bb37c38401) members.).
812815:21:52.558 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 2 is now empty.
812915:21:52.568 [virtual-701] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
813015:21:52.568 [virtual-701] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
813115:21:52.568 [virtual-701] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
813215:21:52.568 [virtual-701] INFO o.a.k.c.m.Metrics - Metrics reporters closed
813315:21:52.570 [virtual-701] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-12 unregistered
813415:21:52.571 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8135 acks = -1
8136 batch.size = 16384
8137 bootstrap.servers = [localhost:6001]
8138 buffer.memory = 33554432
8139 client.dns.lookup = use_all_dns_ips
8140 client.id = producer-20
8141 compression.gzip.level = -1
8142 compression.lz4.level = 9
8143 compression.type = none
8144 compression.zstd.level = 3
8145 connections.max.idle.ms = 540000
8146 delivery.timeout.ms = 120000
8147 enable.idempotence = true
8148 enable.metrics.push = true
8149 interceptor.classes = []
8150 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8151 linger.ms = 5
8152 max.block.ms = 10000
8153 max.in.flight.requests.per.connection = 5
8154 max.request.size = 1048576
8155 metadata.max.age.ms = 300000
8156 metadata.max.idle.ms = 300000
8157 metadata.recovery.rebootstrap.trigger.ms = 300000
8158 metadata.recovery.strategy = rebootstrap
8159 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8160 metrics.num.samples = 2
8161 metrics.recording.level = INFO
8162 metrics.sample.window.ms = 30000
8163 partitioner.adaptive.partitioning.enable = true
8164 partitioner.availability.timeout.ms = 0
8165 partitioner.class = null
8166 partitioner.ignore.keys = false
8167 receive.buffer.bytes = 32768
8168 reconnect.backoff.max.ms = 1000
8169 reconnect.backoff.ms = 50
8170 request.timeout.ms = 30000
8171 retries = 2147483647
8172 retry.backoff.max.ms = 1000
8173 retry.backoff.ms = 1000
8174 sasl.client.callback.handler.class = null
8175 sasl.jaas.config = null
8176 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8177 sasl.kerberos.min.time.before.relogin = 60000
8178 sasl.kerberos.service.name = null
8179 sasl.kerberos.ticket.renew.jitter = 0.05
8180 sasl.kerberos.ticket.renew.window.factor = 0.8
8181 sasl.login.callback.handler.class = null
8182 sasl.login.class = null
8183 sasl.login.connect.timeout.ms = null
8184 sasl.login.read.timeout.ms = null
8185 sasl.login.refresh.buffer.seconds = 300
8186 sasl.login.refresh.min.period.seconds = 60
8187 sasl.login.refresh.window.factor = 0.8
8188 sasl.login.refresh.window.jitter = 0.05
8189 sasl.login.retry.backoff.max.ms = 10000
8190 sasl.login.retry.backoff.ms = 100
8191 sasl.mechanism = GSSAPI
8192 sasl.oauthbearer.assertion.algorithm = RS256
8193 sasl.oauthbearer.assertion.claim.aud = null
8194 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8195 sasl.oauthbearer.assertion.claim.iss = null
8196 sasl.oauthbearer.assertion.claim.jti.include = false
8197 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8198 sasl.oauthbearer.assertion.claim.sub = null
8199 sasl.oauthbearer.assertion.file = null
8200 sasl.oauthbearer.assertion.private.key.file = null
8201 sasl.oauthbearer.assertion.private.key.passphrase = null
8202 sasl.oauthbearer.assertion.template.file = null
8203 sasl.oauthbearer.client.credentials.client.id = null
8204 sasl.oauthbearer.client.credentials.client.secret = null
8205 sasl.oauthbearer.clock.skew.seconds = 30
8206 sasl.oauthbearer.expected.audience = null
8207 sasl.oauthbearer.expected.issuer = null
8208 sasl.oauthbearer.header.urlencode = false
8209 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8210 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8211 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8212 sasl.oauthbearer.jwks.endpoint.url = null
8213 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8214 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8215 sasl.oauthbearer.scope = null
8216 sasl.oauthbearer.scope.claim.name = scope
8217 sasl.oauthbearer.sub.claim.name = sub
8218 sasl.oauthbearer.token.endpoint.url = null
8219 security.protocol = PLAINTEXT
8220 security.providers = null
8221 send.buffer.bytes = 131072
8222 socket.connection.setup.timeout.max.ms = 30000
8223 socket.connection.setup.timeout.ms = 10000
8224 ssl.cipher.suites = null
8225 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8226 ssl.endpoint.identification.algorithm = https
8227 ssl.engine.factory.class = null
8228 ssl.key.password = null
8229 ssl.keymanager.algorithm = SunX509
8230 ssl.keystore.certificate.chain = null
8231 ssl.keystore.key = null
8232 ssl.keystore.location = null
8233 ssl.keystore.password = null
8234 ssl.keystore.type = JKS
8235 ssl.protocol = TLSv1.3
8236 ssl.provider = null
8237 ssl.secure.random.implementation = null
8238 ssl.trustmanager.algorithm = PKIX
8239 ssl.truststore.certificates = null
8240 ssl.truststore.location = null
8241 ssl.truststore.password = null
8242 ssl.truststore.type = JKS
8243 transaction.timeout.ms = 60000
8244 transaction.two.phase.commit.enable = false
8245 transactional.id = null
8246 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8247
824815:21:52.571 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
824915:21:52.571 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Instantiated an idempotent producer.
825015:21:52.573 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
825115:21:52.573 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
825215:21:52.573 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314112573
825315:21:52.576 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.Metadata - [Producer clientId=producer-20] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
825415:21:52.576 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-20] ProducerId set to 19 with epoch 0
825515:21:52.584 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
825615:21:52.585 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
825715:21:52.585 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
825815:21:52.585 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
825915:21:52.585 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
826015:21:52.585 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-20 unregistered
826115:21:52.587 [virtual-703] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8262 allow.auto.create.topics = true
8263 auto.commit.interval.ms = 5000
8264 auto.offset.reset = earliest
8265 bootstrap.servers = [localhost:6001]
8266 check.crcs = true
8267 client.dns.lookup = use_all_dns_ips
8268 client.id = consumer-g6_1-13
8269 client.rack =
8270 connections.max.idle.ms = 540000
8271 default.api.timeout.ms = 60000
8272 enable.auto.commit = false
8273 enable.metrics.push = true
8274 exclude.internal.topics = true
8275 fetch.max.bytes = 52428800
8276 fetch.max.wait.ms = 500
8277 fetch.min.bytes = 1
8278 group.id = g6_1
8279 group.instance.id = null
8280 group.protocol = classic
8281 group.remote.assignor = null
8282 heartbeat.interval.ms = 3000
8283 interceptor.classes = []
8284 internal.leave.group.on.close = true
8285 internal.throw.on.fetch.stable.offset.unsupported = false
8286 isolation.level = read_uncommitted
8287 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8288 max.partition.fetch.bytes = 1048576
8289 max.poll.interval.ms = 300000
8290 max.poll.records = 500
8291 metadata.max.age.ms = 300000
8292 metadata.recovery.rebootstrap.trigger.ms = 300000
8293 metadata.recovery.strategy = rebootstrap
8294 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8295 metrics.num.samples = 2
8296 metrics.recording.level = INFO
8297 metrics.sample.window.ms = 30000
8298 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8299 receive.buffer.bytes = 65536
8300 reconnect.backoff.max.ms = 1000
8301 reconnect.backoff.ms = 50
8302 request.timeout.ms = 30000
8303 retry.backoff.max.ms = 1000
8304 retry.backoff.ms = 100
8305 sasl.client.callback.handler.class = null
8306 sasl.jaas.config = null
8307 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8308 sasl.kerberos.min.time.before.relogin = 60000
8309 sasl.kerberos.service.name = null
8310 sasl.kerberos.ticket.renew.jitter = 0.05
8311 sasl.kerberos.ticket.renew.window.factor = 0.8
8312 sasl.login.callback.handler.class = null
8313 sasl.login.class = null
8314 sasl.login.connect.timeout.ms = null
8315 sasl.login.read.timeout.ms = null
8316 sasl.login.refresh.buffer.seconds = 300
8317 sasl.login.refresh.min.period.seconds = 60
8318 sasl.login.refresh.window.factor = 0.8
8319 sasl.login.refresh.window.jitter = 0.05
8320 sasl.login.retry.backoff.max.ms = 10000
8321 sasl.login.retry.backoff.ms = 100
8322 sasl.mechanism = GSSAPI
8323 sasl.oauthbearer.assertion.algorithm = RS256
8324 sasl.oauthbearer.assertion.claim.aud = null
8325 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8326 sasl.oauthbearer.assertion.claim.iss = null
8327 sasl.oauthbearer.assertion.claim.jti.include = false
8328 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8329 sasl.oauthbearer.assertion.claim.sub = null
8330 sasl.oauthbearer.assertion.file = null
8331 sasl.oauthbearer.assertion.private.key.file = null
8332 sasl.oauthbearer.assertion.private.key.passphrase = null
8333 sasl.oauthbearer.assertion.template.file = null
8334 sasl.oauthbearer.client.credentials.client.id = null
8335 sasl.oauthbearer.client.credentials.client.secret = null
8336 sasl.oauthbearer.clock.skew.seconds = 30
8337 sasl.oauthbearer.expected.audience = null
8338 sasl.oauthbearer.expected.issuer = null
8339 sasl.oauthbearer.header.urlencode = false
8340 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8341 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8342 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8343 sasl.oauthbearer.jwks.endpoint.url = null
8344 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8345 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8346 sasl.oauthbearer.scope = null
8347 sasl.oauthbearer.scope.claim.name = scope
8348 sasl.oauthbearer.sub.claim.name = sub
8349 sasl.oauthbearer.token.endpoint.url = null
8350 security.protocol = PLAINTEXT
8351 security.providers = null
8352 send.buffer.bytes = 131072
8353 session.timeout.ms = 45000
8354 share.acknowledgement.mode = implicit
8355 socket.connection.setup.timeout.max.ms = 30000
8356 socket.connection.setup.timeout.ms = 10000
8357 ssl.cipher.suites = null
8358 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8359 ssl.endpoint.identification.algorithm = https
8360 ssl.engine.factory.class = null
8361 ssl.key.password = null
8362 ssl.keymanager.algorithm = SunX509
8363 ssl.keystore.certificate.chain = null
8364 ssl.keystore.key = null
8365 ssl.keystore.location = null
8366 ssl.keystore.password = null
8367 ssl.keystore.type = JKS
8368 ssl.protocol = TLSv1.3
8369 ssl.provider = null
8370 ssl.secure.random.implementation = null
8371 ssl.trustmanager.algorithm = PKIX
8372 ssl.truststore.certificates = null
8373 ssl.truststore.location = null
8374 ssl.truststore.password = null
8375 ssl.truststore.type = JKS
8376 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8377
837815:21:52.587 [virtual-703] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
837915:21:52.590 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
838015:21:52.590 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
838115:21:52.590 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314112590
838215:21:52.591 [virtual-706] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Subscribed to topic(s): t6_1
838315:21:52.593 [virtual-706] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
838415:21:52.594 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
838515:21:52.594 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
838615:21:52.596 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9 and requesting the member to rejoin with this id.
838715:21:52.596 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9
838815:21:52.596 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
838915:21:52.597 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9 joins group g6_1 in Empty state. Adding to the group now.
839015:21:52.597 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9).
839115:21:54.910 [quorum-controller-0-event-handler] INFO o.a.k.c.EventPerformanceMonitor - [QuorumController id=0] In the last 60000 ms period, 352 controller events were completed, which took an average of 9.96 ms each. The slowest event was completeActivation[1](143821949), which took 37.77 ms.
839215:21:55.598 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 3 with 1 members.
839315:21:55.599 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9', protocol='range'}
839415:21:55.599 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Finished assignment for group at generation 3: {consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9=Assignment(partitions=[t6_1-0])}
839515:21:55.600 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9 for group g6_1 for generation 3. The group has 1 members, 0 of which are static.
839615:21:55.606 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9', protocol='range'}
839715:21:55.606 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
839815:21:55.607 [virtual-706] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
839915:21:55.608 [virtual-706] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t6_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
840015:21:55.611 [virtual-703] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8401 allow.auto.create.topics = true
8402 auto.commit.interval.ms = 5000
8403 auto.offset.reset = earliest
8404 bootstrap.servers = [localhost:6001]
8405 check.crcs = true
8406 client.dns.lookup = use_all_dns_ips
8407 client.id = consumer-g6_2-14
8408 client.rack =
8409 connections.max.idle.ms = 540000
8410 default.api.timeout.ms = 60000
8411 enable.auto.commit = false
8412 enable.metrics.push = true
8413 exclude.internal.topics = true
8414 fetch.max.bytes = 52428800
8415 fetch.max.wait.ms = 500
8416 fetch.min.bytes = 1
8417 group.id = g6_2
8418 group.instance.id = null
8419 group.protocol = classic
8420 group.remote.assignor = null
8421 heartbeat.interval.ms = 3000
8422 interceptor.classes = []
8423 internal.leave.group.on.close = true
8424 internal.throw.on.fetch.stable.offset.unsupported = false
8425 isolation.level = read_uncommitted
8426 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8427 max.partition.fetch.bytes = 1048576
8428 max.poll.interval.ms = 300000
8429 max.poll.records = 500
8430 metadata.max.age.ms = 300000
8431 metadata.recovery.rebootstrap.trigger.ms = 300000
8432 metadata.recovery.strategy = rebootstrap
8433 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8434 metrics.num.samples = 2
8435 metrics.recording.level = INFO
8436 metrics.sample.window.ms = 30000
8437 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8438 receive.buffer.bytes = 65536
8439 reconnect.backoff.max.ms = 1000
8440 reconnect.backoff.ms = 50
8441 request.timeout.ms = 30000
8442 retry.backoff.max.ms = 1000
8443 retry.backoff.ms = 100
8444 sasl.client.callback.handler.class = null
8445 sasl.jaas.config = null
8446 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8447 sasl.kerberos.min.time.before.relogin = 60000
8448 sasl.kerberos.service.name = null
8449 sasl.kerberos.ticket.renew.jitter = 0.05
8450 sasl.kerberos.ticket.renew.window.factor = 0.8
8451 sasl.login.callback.handler.class = null
8452 sasl.login.class = null
8453 sasl.login.connect.timeout.ms = null
8454 sasl.login.read.timeout.ms = null
8455 sasl.login.refresh.buffer.seconds = 300
8456 sasl.login.refresh.min.period.seconds = 60
8457 sasl.login.refresh.window.factor = 0.8
8458 sasl.login.refresh.window.jitter = 0.05
8459 sasl.login.retry.backoff.max.ms = 10000
8460 sasl.login.retry.backoff.ms = 100
8461 sasl.mechanism = GSSAPI
8462 sasl.oauthbearer.assertion.algorithm = RS256
8463 sasl.oauthbearer.assertion.claim.aud = null
8464 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8465 sasl.oauthbearer.assertion.claim.iss = null
8466 sasl.oauthbearer.assertion.claim.jti.include = false
8467 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8468 sasl.oauthbearer.assertion.claim.sub = null
8469 sasl.oauthbearer.assertion.file = null
8470 sasl.oauthbearer.assertion.private.key.file = null
8471 sasl.oauthbearer.assertion.private.key.passphrase = null
8472 sasl.oauthbearer.assertion.template.file = null
8473 sasl.oauthbearer.client.credentials.client.id = null
8474 sasl.oauthbearer.client.credentials.client.secret = null
8475 sasl.oauthbearer.clock.skew.seconds = 30
8476 sasl.oauthbearer.expected.audience = null
8477 sasl.oauthbearer.expected.issuer = null
8478 sasl.oauthbearer.header.urlencode = false
8479 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8480 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8481 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8482 sasl.oauthbearer.jwks.endpoint.url = null
8483 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8484 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8485 sasl.oauthbearer.scope = null
8486 sasl.oauthbearer.scope.claim.name = scope
8487 sasl.oauthbearer.sub.claim.name = sub
8488 sasl.oauthbearer.token.endpoint.url = null
8489 security.protocol = PLAINTEXT
8490 security.providers = null
8491 send.buffer.bytes = 131072
8492 session.timeout.ms = 45000
8493 share.acknowledgement.mode = implicit
8494 socket.connection.setup.timeout.max.ms = 30000
8495 socket.connection.setup.timeout.ms = 10000
8496 ssl.cipher.suites = null
8497 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8498 ssl.endpoint.identification.algorithm = https
8499 ssl.engine.factory.class = null
8500 ssl.key.password = null
8501 ssl.keymanager.algorithm = SunX509
8502 ssl.keystore.certificate.chain = null
8503 ssl.keystore.key = null
8504 ssl.keystore.location = null
8505 ssl.keystore.password = null
8506 ssl.keystore.type = JKS
8507 ssl.protocol = TLSv1.3
8508 ssl.provider = null
8509 ssl.secure.random.implementation = null
8510 ssl.trustmanager.algorithm = PKIX
8511 ssl.truststore.certificates = null
8512 ssl.truststore.location = null
8513 ssl.truststore.password = null
8514 ssl.truststore.type = JKS
8515 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8516
851715:21:55.611 [virtual-703] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
851815:21:55.613 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
851915:21:55.613 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
852015:21:55.613 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314115613
852115:21:55.614 [virtual-710] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Subscribed to topic(s): t6_1
852215:21:55.617 [virtual-710] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
852315:21:55.617 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
852415:21:55.618 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
852515:21:55.620 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_2 in Empty state. Created a new member id consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750 and requesting the member to rejoin with this id.
852615:21:55.620 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: need to re-join with the given member-id: consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750
852715:21:55.620 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
852815:21:55.621 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750 joins group g6_2 in Empty state. Adding to the group now.
852915:21:55.621 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750).
853015:21:58.622 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_2 generation 1 with 1 members.
853115:21:58.623 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750', protocol='range'}
853215:21:58.623 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Finished assignment for group at generation 1: {consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750=Assignment(partitions=[t6_1-0])}
853315:21:58.623 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750 for group g6_2 for generation 1. The group has 1 members, 0 of which are static.
853415:21:58.630 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750', protocol='range'}
853515:21:58.630 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Notifying assignor about the new Assignment(partitions=[t6_1-0])
853615:21:58.630 [virtual-710] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Adding newly assigned partitions: [t6_1-0]
853715:21:58.631 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Found no committed offset for partition t6_1-0
853815:21:58.633 [virtual-710] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
853915:21:58.635 [virtual-710] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8540java.lang.InterruptedException: null
8541 ... 18 common frames omitted
8542Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8543 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8544 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8545 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8546 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8547 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8548 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8549 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8550 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8551 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8552 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8553 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8554 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8555 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8556 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8557 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8558 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8559 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8560 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
856115:21:58.635 [virtual-706] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8562java.lang.InterruptedException: null
8563 ... 18 common frames omitted
8564Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8565 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8566 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8567 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8568 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8569 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8570 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8571 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8572 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8573 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8574 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8575 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8576 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8577 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8578 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8579 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8580 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8581 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8582 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
858315:21:58.635 [virtual-705] ERROR o.k.KafkaFlow$ - Exception when polling for records
8584java.lang.InterruptedException: null
8585 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8586 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8587 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8588 at ox.channels.ActorRef.ask(actor.scala:64)
8589 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8590 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8591 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8592 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8593 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8594 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8595 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8596 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8597 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8598 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
859915:21:58.635 [virtual-709] ERROR o.k.KafkaFlow$ - Exception when polling for records
8600java.lang.InterruptedException: null
8601 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8602 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8603 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8604 at ox.channels.ActorRef.ask(actor.scala:64)
8605 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8606 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8607 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8608 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8609 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8610 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8611 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8612 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8613 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8614 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
861515:21:58.636 [virtual-713] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
861615:21:58.636 [virtual-712] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Revoke previously assigned partitions [t6_1-0]
861715:21:58.636 [virtual-713] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Member consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
861815:21:58.636 [virtual-712] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Member consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
861915:21:58.636 [virtual-712] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting generation and member id due to: consumer pro-actively leaving the group
862015:21:58.636 [virtual-712] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: consumer pro-actively leaving the group
862115:21:58.636 [virtual-713] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
862215:21:58.636 [virtual-713] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
862315:21:58.637 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_2] Member consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
862415:21:58.637 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_2-14-de8aba5c-7031-41c5-81b9-39ced5fe5750) members.).
862515:21:58.637 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_2 with generation 2 is now empty.
862615:21:58.637 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
862715:21:58.637 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g6_1-13-9192b568-76a9-430c-823a-0afa144ebbf9) members.).
862815:21:58.638 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 4 is now empty.
862915:21:59.122 [virtual-713] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
863015:21:59.122 [virtual-713] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
863115:21:59.122 [virtual-713] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
863215:21:59.122 [virtual-713] INFO o.a.k.c.m.Metrics - Metrics reporters closed
863315:21:59.124 [virtual-713] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-13 unregistered
863415:21:59.136 [virtual-712] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
863515:21:59.136 [virtual-712] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
863615:21:59.136 [virtual-712] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
863715:21:59.136 [virtual-712] INFO o.a.k.c.m.Metrics - Metrics reporters closed
863815:21:59.138 [virtual-712] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_2-14 unregistered
863915:21:59.139 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8640 acks = -1
8641 batch.size = 16384
8642 bootstrap.servers = [localhost:6001]
8643 buffer.memory = 33554432
8644 client.dns.lookup = use_all_dns_ips
8645 client.id = producer-21
8646 compression.gzip.level = -1
8647 compression.lz4.level = 9
8648 compression.type = none
8649 compression.zstd.level = 3
8650 connections.max.idle.ms = 540000
8651 delivery.timeout.ms = 120000
8652 enable.idempotence = true
8653 enable.metrics.push = true
8654 interceptor.classes = []
8655 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8656 linger.ms = 5
8657 max.block.ms = 10000
8658 max.in.flight.requests.per.connection = 5
8659 max.request.size = 1048576
8660 metadata.max.age.ms = 300000
8661 metadata.max.idle.ms = 300000
8662 metadata.recovery.rebootstrap.trigger.ms = 300000
8663 metadata.recovery.strategy = rebootstrap
8664 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8665 metrics.num.samples = 2
8666 metrics.recording.level = INFO
8667 metrics.sample.window.ms = 30000
8668 partitioner.adaptive.partitioning.enable = true
8669 partitioner.availability.timeout.ms = 0
8670 partitioner.class = null
8671 partitioner.ignore.keys = false
8672 receive.buffer.bytes = 32768
8673 reconnect.backoff.max.ms = 1000
8674 reconnect.backoff.ms = 50
8675 request.timeout.ms = 30000
8676 retries = 2147483647
8677 retry.backoff.max.ms = 1000
8678 retry.backoff.ms = 1000
8679 sasl.client.callback.handler.class = null
8680 sasl.jaas.config = null
8681 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8682 sasl.kerberos.min.time.before.relogin = 60000
8683 sasl.kerberos.service.name = null
8684 sasl.kerberos.ticket.renew.jitter = 0.05
8685 sasl.kerberos.ticket.renew.window.factor = 0.8
8686 sasl.login.callback.handler.class = null
8687 sasl.login.class = null
8688 sasl.login.connect.timeout.ms = null
8689 sasl.login.read.timeout.ms = null
8690 sasl.login.refresh.buffer.seconds = 300
8691 sasl.login.refresh.min.period.seconds = 60
8692 sasl.login.refresh.window.factor = 0.8
8693 sasl.login.refresh.window.jitter = 0.05
8694 sasl.login.retry.backoff.max.ms = 10000
8695 sasl.login.retry.backoff.ms = 100
8696 sasl.mechanism = GSSAPI
8697 sasl.oauthbearer.assertion.algorithm = RS256
8698 sasl.oauthbearer.assertion.claim.aud = null
8699 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8700 sasl.oauthbearer.assertion.claim.iss = null
8701 sasl.oauthbearer.assertion.claim.jti.include = false
8702 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8703 sasl.oauthbearer.assertion.claim.sub = null
8704 sasl.oauthbearer.assertion.file = null
8705 sasl.oauthbearer.assertion.private.key.file = null
8706 sasl.oauthbearer.assertion.private.key.passphrase = null
8707 sasl.oauthbearer.assertion.template.file = null
8708 sasl.oauthbearer.client.credentials.client.id = null
8709 sasl.oauthbearer.client.credentials.client.secret = null
8710 sasl.oauthbearer.clock.skew.seconds = 30
8711 sasl.oauthbearer.expected.audience = null
8712 sasl.oauthbearer.expected.issuer = null
8713 sasl.oauthbearer.header.urlencode = false
8714 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8715 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8716 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8717 sasl.oauthbearer.jwks.endpoint.url = null
8718 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8719 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8720 sasl.oauthbearer.scope = null
8721 sasl.oauthbearer.scope.claim.name = scope
8722 sasl.oauthbearer.sub.claim.name = sub
8723 sasl.oauthbearer.token.endpoint.url = null
8724 security.protocol = PLAINTEXT
8725 security.providers = null
8726 send.buffer.bytes = 131072
8727 socket.connection.setup.timeout.max.ms = 30000
8728 socket.connection.setup.timeout.ms = 10000
8729 ssl.cipher.suites = null
8730 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8731 ssl.endpoint.identification.algorithm = https
8732 ssl.engine.factory.class = null
8733 ssl.key.password = null
8734 ssl.keymanager.algorithm = SunX509
8735 ssl.keystore.certificate.chain = null
8736 ssl.keystore.key = null
8737 ssl.keystore.location = null
8738 ssl.keystore.password = null
8739 ssl.keystore.type = JKS
8740 ssl.protocol = TLSv1.3
8741 ssl.provider = null
8742 ssl.secure.random.implementation = null
8743 ssl.trustmanager.algorithm = PKIX
8744 ssl.truststore.certificates = null
8745 ssl.truststore.location = null
8746 ssl.truststore.password = null
8747 ssl.truststore.type = JKS
8748 transaction.timeout.ms = 60000
8749 transaction.two.phase.commit.enable = false
8750 transactional.id = null
8751 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8752
875315:21:59.139 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
875415:21:59.140 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Instantiated an idempotent producer.
875515:21:59.141 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
875615:21:59.141 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
875715:21:59.141 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314119141
875815:21:59.143 [data-plane-kafka-request-handler-3] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t7_1) to the active controller.
875915:21:59.145 [kafka-producer-network-thread | producer-21] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-21] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t7_1=UNKNOWN_TOPIC_OR_PARTITION}
876015:21:59.145 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.Metadata - [Producer clientId=producer-21] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
876115:21:59.145 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t7_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
876215:21:59.145 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-21] ProducerId set to 20 with epoch 0
876315:21:59.145 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t7_1 with topic ID reBHK77oTTOVa6OEjaY2Yw.
876415:21:59.145 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t7_1-0 with topic ID reBHK77oTTOVa6OEjaY2Yw and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
876515:21:59.171 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
876615:21:59.171 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t7_1-0)
876715:21:59.171 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t7_1-0 with topic id reBHK77oTTOVa6OEjaY2Yw.
876815:21:59.173 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t7_1-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
876915:21:59.173 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t7_1-0 in /tmp/kafka-logs5982689497894266552/t7_1-0 with properties {}
877015:21:59.174 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] No checkpointed highwatermark is found for partition t7_1-0
877115:21:59.174 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] Log loaded for partition t7_1-0 with initial high watermark 0
877215:21:59.174 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t7_1-0 with topic id Some(reBHK77oTTOVa6OEjaY2Yw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
877315:22:00.155 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
877415:22:00.156 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
877515:22:00.156 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
877615:22:00.156 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
877715:22:00.156 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
877815:22:00.156 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-21 unregistered
877915:22:00.157 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8780 acks = -1
8781 batch.size = 16384
8782 bootstrap.servers = [localhost:6001]
8783 buffer.memory = 33554432
8784 client.dns.lookup = use_all_dns_ips
8785 client.id = producer-22
8786 compression.gzip.level = -1
8787 compression.lz4.level = 9
8788 compression.type = none
8789 compression.zstd.level = 3
8790 connections.max.idle.ms = 540000
8791 delivery.timeout.ms = 120000
8792 enable.idempotence = true
8793 enable.metrics.push = true
8794 interceptor.classes = []
8795 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8796 linger.ms = 5
8797 max.block.ms = 10000
8798 max.in.flight.requests.per.connection = 5
8799 max.request.size = 1048576
8800 metadata.max.age.ms = 300000
8801 metadata.max.idle.ms = 300000
8802 metadata.recovery.rebootstrap.trigger.ms = 300000
8803 metadata.recovery.strategy = rebootstrap
8804 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8805 metrics.num.samples = 2
8806 metrics.recording.level = INFO
8807 metrics.sample.window.ms = 30000
8808 partitioner.adaptive.partitioning.enable = true
8809 partitioner.availability.timeout.ms = 0
8810 partitioner.class = null
8811 partitioner.ignore.keys = false
8812 receive.buffer.bytes = 32768
8813 reconnect.backoff.max.ms = 1000
8814 reconnect.backoff.ms = 50
8815 request.timeout.ms = 30000
8816 retries = 2147483647
8817 retry.backoff.max.ms = 1000
8818 retry.backoff.ms = 1000
8819 sasl.client.callback.handler.class = null
8820 sasl.jaas.config = null
8821 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8822 sasl.kerberos.min.time.before.relogin = 60000
8823 sasl.kerberos.service.name = null
8824 sasl.kerberos.ticket.renew.jitter = 0.05
8825 sasl.kerberos.ticket.renew.window.factor = 0.8
8826 sasl.login.callback.handler.class = null
8827 sasl.login.class = null
8828 sasl.login.connect.timeout.ms = null
8829 sasl.login.read.timeout.ms = null
8830 sasl.login.refresh.buffer.seconds = 300
8831 sasl.login.refresh.min.period.seconds = 60
8832 sasl.login.refresh.window.factor = 0.8
8833 sasl.login.refresh.window.jitter = 0.05
8834 sasl.login.retry.backoff.max.ms = 10000
8835 sasl.login.retry.backoff.ms = 100
8836 sasl.mechanism = GSSAPI
8837 sasl.oauthbearer.assertion.algorithm = RS256
8838 sasl.oauthbearer.assertion.claim.aud = null
8839 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8840 sasl.oauthbearer.assertion.claim.iss = null
8841 sasl.oauthbearer.assertion.claim.jti.include = false
8842 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8843 sasl.oauthbearer.assertion.claim.sub = null
8844 sasl.oauthbearer.assertion.file = null
8845 sasl.oauthbearer.assertion.private.key.file = null
8846 sasl.oauthbearer.assertion.private.key.passphrase = null
8847 sasl.oauthbearer.assertion.template.file = null
8848 sasl.oauthbearer.client.credentials.client.id = null
8849 sasl.oauthbearer.client.credentials.client.secret = null
8850 sasl.oauthbearer.clock.skew.seconds = 30
8851 sasl.oauthbearer.expected.audience = null
8852 sasl.oauthbearer.expected.issuer = null
8853 sasl.oauthbearer.header.urlencode = false
8854 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8855 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8856 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8857 sasl.oauthbearer.jwks.endpoint.url = null
8858 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8859 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8860 sasl.oauthbearer.scope = null
8861 sasl.oauthbearer.scope.claim.name = scope
8862 sasl.oauthbearer.sub.claim.name = sub
8863 sasl.oauthbearer.token.endpoint.url = null
8864 security.protocol = PLAINTEXT
8865 security.providers = null
8866 send.buffer.bytes = 131072
8867 socket.connection.setup.timeout.max.ms = 30000
8868 socket.connection.setup.timeout.ms = 10000
8869 ssl.cipher.suites = null
8870 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8871 ssl.endpoint.identification.algorithm = https
8872 ssl.engine.factory.class = null
8873 ssl.key.password = null
8874 ssl.keymanager.algorithm = SunX509
8875 ssl.keystore.certificate.chain = null
8876 ssl.keystore.key = null
8877 ssl.keystore.location = null
8878 ssl.keystore.password = null
8879 ssl.keystore.type = JKS
8880 ssl.protocol = TLSv1.3
8881 ssl.provider = null
8882 ssl.secure.random.implementation = null
8883 ssl.trustmanager.algorithm = PKIX
8884 ssl.truststore.certificates = null
8885 ssl.truststore.location = null
8886 ssl.truststore.password = null
8887 ssl.truststore.type = JKS
8888 transaction.timeout.ms = 60000
8889 transaction.two.phase.commit.enable = false
8890 transactional.id = null
8891 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8892
889315:22:00.157 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
889415:22:00.158 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Instantiated an idempotent producer.
889515:22:00.159 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
889615:22:00.159 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
889715:22:00.159 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314120159
889815:22:00.161 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.Metadata - [Producer clientId=producer-22] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
889915:22:00.161 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-22] ProducerId set to 21 with epoch 0
890015:22:00.169 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
890115:22:00.170 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
890215:22:00.170 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
890315:22:00.170 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
890415:22:00.170 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
890515:22:00.170 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-22 unregistered
890615:22:00.171 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8907 acks = -1
8908 batch.size = 16384
8909 bootstrap.servers = [localhost:6001]
8910 buffer.memory = 33554432
8911 client.dns.lookup = use_all_dns_ips
8912 client.id = producer-23
8913 compression.gzip.level = -1
8914 compression.lz4.level = 9
8915 compression.type = none
8916 compression.zstd.level = 3
8917 connections.max.idle.ms = 540000
8918 delivery.timeout.ms = 120000
8919 enable.idempotence = true
8920 enable.metrics.push = true
8921 interceptor.classes = []
8922 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8923 linger.ms = 5
8924 max.block.ms = 10000
8925 max.in.flight.requests.per.connection = 5
8926 max.request.size = 1048576
8927 metadata.max.age.ms = 300000
8928 metadata.max.idle.ms = 300000
8929 metadata.recovery.rebootstrap.trigger.ms = 300000
8930 metadata.recovery.strategy = rebootstrap
8931 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8932 metrics.num.samples = 2
8933 metrics.recording.level = INFO
8934 metrics.sample.window.ms = 30000
8935 partitioner.adaptive.partitioning.enable = true
8936 partitioner.availability.timeout.ms = 0
8937 partitioner.class = null
8938 partitioner.ignore.keys = false
8939 receive.buffer.bytes = 32768
8940 reconnect.backoff.max.ms = 1000
8941 reconnect.backoff.ms = 50
8942 request.timeout.ms = 30000
8943 retries = 2147483647
8944 retry.backoff.max.ms = 1000
8945 retry.backoff.ms = 1000
8946 sasl.client.callback.handler.class = null
8947 sasl.jaas.config = null
8948 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8949 sasl.kerberos.min.time.before.relogin = 60000
8950 sasl.kerberos.service.name = null
8951 sasl.kerberos.ticket.renew.jitter = 0.05
8952 sasl.kerberos.ticket.renew.window.factor = 0.8
8953 sasl.login.callback.handler.class = null
8954 sasl.login.class = null
8955 sasl.login.connect.timeout.ms = null
8956 sasl.login.read.timeout.ms = null
8957 sasl.login.refresh.buffer.seconds = 300
8958 sasl.login.refresh.min.period.seconds = 60
8959 sasl.login.refresh.window.factor = 0.8
8960 sasl.login.refresh.window.jitter = 0.05
8961 sasl.login.retry.backoff.max.ms = 10000
8962 sasl.login.retry.backoff.ms = 100
8963 sasl.mechanism = GSSAPI
8964 sasl.oauthbearer.assertion.algorithm = RS256
8965 sasl.oauthbearer.assertion.claim.aud = null
8966 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8967 sasl.oauthbearer.assertion.claim.iss = null
8968 sasl.oauthbearer.assertion.claim.jti.include = false
8969 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8970 sasl.oauthbearer.assertion.claim.sub = null
8971 sasl.oauthbearer.assertion.file = null
8972 sasl.oauthbearer.assertion.private.key.file = null
8973 sasl.oauthbearer.assertion.private.key.passphrase = null
8974 sasl.oauthbearer.assertion.template.file = null
8975 sasl.oauthbearer.client.credentials.client.id = null
8976 sasl.oauthbearer.client.credentials.client.secret = null
8977 sasl.oauthbearer.clock.skew.seconds = 30
8978 sasl.oauthbearer.expected.audience = null
8979 sasl.oauthbearer.expected.issuer = null
8980 sasl.oauthbearer.header.urlencode = false
8981 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8982 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8983 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8984 sasl.oauthbearer.jwks.endpoint.url = null
8985 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8986 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8987 sasl.oauthbearer.scope = null
8988 sasl.oauthbearer.scope.claim.name = scope
8989 sasl.oauthbearer.sub.claim.name = sub
8990 sasl.oauthbearer.token.endpoint.url = null
8991 security.protocol = PLAINTEXT
8992 security.providers = null
8993 send.buffer.bytes = 131072
8994 socket.connection.setup.timeout.max.ms = 30000
8995 socket.connection.setup.timeout.ms = 10000
8996 ssl.cipher.suites = null
8997 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8998 ssl.endpoint.identification.algorithm = https
8999 ssl.engine.factory.class = null
9000 ssl.key.password = null
9001 ssl.keymanager.algorithm = SunX509
9002 ssl.keystore.certificate.chain = null
9003 ssl.keystore.key = null
9004 ssl.keystore.location = null
9005 ssl.keystore.password = null
9006 ssl.keystore.type = JKS
9007 ssl.protocol = TLSv1.3
9008 ssl.provider = null
9009 ssl.secure.random.implementation = null
9010 ssl.trustmanager.algorithm = PKIX
9011 ssl.truststore.certificates = null
9012 ssl.truststore.location = null
9013 ssl.truststore.password = null
9014 ssl.truststore.type = JKS
9015 transaction.timeout.ms = 60000
9016 transaction.two.phase.commit.enable = false
9017 transactional.id = null
9018 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9019
902015:22:00.171 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
902115:22:00.171 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Instantiated an idempotent producer.
902215:22:00.172 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
902315:22:00.172 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
902415:22:00.172 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314120172
902515:22:00.175 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.Metadata - [Producer clientId=producer-23] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
902615:22:00.175 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-23] ProducerId set to 22 with epoch 0
902715:22:00.182 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
902815:22:00.184 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
902915:22:00.184 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
903015:22:00.184 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
903115:22:00.184 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
903215:22:00.184 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-23 unregistered
903315:22:00.186 [virtual-719] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9034 allow.auto.create.topics = true
9035 auto.commit.interval.ms = 5000
9036 auto.offset.reset = earliest
9037 bootstrap.servers = [localhost:6001]
9038 check.crcs = true
9039 client.dns.lookup = use_all_dns_ips
9040 client.id = consumer-g7_1-15
9041 client.rack =
9042 connections.max.idle.ms = 540000
9043 default.api.timeout.ms = 60000
9044 enable.auto.commit = false
9045 enable.metrics.push = true
9046 exclude.internal.topics = true
9047 fetch.max.bytes = 52428800
9048 fetch.max.wait.ms = 500
9049 fetch.min.bytes = 1
9050 group.id = g7_1
9051 group.instance.id = null
9052 group.protocol = classic
9053 group.remote.assignor = null
9054 heartbeat.interval.ms = 3000
9055 interceptor.classes = []
9056 internal.leave.group.on.close = true
9057 internal.throw.on.fetch.stable.offset.unsupported = false
9058 isolation.level = read_uncommitted
9059 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9060 max.partition.fetch.bytes = 1048576
9061 max.poll.interval.ms = 300000
9062 max.poll.records = 500
9063 metadata.max.age.ms = 300000
9064 metadata.recovery.rebootstrap.trigger.ms = 300000
9065 metadata.recovery.strategy = rebootstrap
9066 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9067 metrics.num.samples = 2
9068 metrics.recording.level = INFO
9069 metrics.sample.window.ms = 30000
9070 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9071 receive.buffer.bytes = 65536
9072 reconnect.backoff.max.ms = 1000
9073 reconnect.backoff.ms = 50
9074 request.timeout.ms = 30000
9075 retry.backoff.max.ms = 1000
9076 retry.backoff.ms = 100
9077 sasl.client.callback.handler.class = null
9078 sasl.jaas.config = null
9079 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9080 sasl.kerberos.min.time.before.relogin = 60000
9081 sasl.kerberos.service.name = null
9082 sasl.kerberos.ticket.renew.jitter = 0.05
9083 sasl.kerberos.ticket.renew.window.factor = 0.8
9084 sasl.login.callback.handler.class = null
9085 sasl.login.class = null
9086 sasl.login.connect.timeout.ms = null
9087 sasl.login.read.timeout.ms = null
9088 sasl.login.refresh.buffer.seconds = 300
9089 sasl.login.refresh.min.period.seconds = 60
9090 sasl.login.refresh.window.factor = 0.8
9091 sasl.login.refresh.window.jitter = 0.05
9092 sasl.login.retry.backoff.max.ms = 10000
9093 sasl.login.retry.backoff.ms = 100
9094 sasl.mechanism = GSSAPI
9095 sasl.oauthbearer.assertion.algorithm = RS256
9096 sasl.oauthbearer.assertion.claim.aud = null
9097 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9098 sasl.oauthbearer.assertion.claim.iss = null
9099 sasl.oauthbearer.assertion.claim.jti.include = false
9100 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9101 sasl.oauthbearer.assertion.claim.sub = null
9102 sasl.oauthbearer.assertion.file = null
9103 sasl.oauthbearer.assertion.private.key.file = null
9104 sasl.oauthbearer.assertion.private.key.passphrase = null
9105 sasl.oauthbearer.assertion.template.file = null
9106 sasl.oauthbearer.client.credentials.client.id = null
9107 sasl.oauthbearer.client.credentials.client.secret = null
9108 sasl.oauthbearer.clock.skew.seconds = 30
9109 sasl.oauthbearer.expected.audience = null
9110 sasl.oauthbearer.expected.issuer = null
9111 sasl.oauthbearer.header.urlencode = false
9112 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9113 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9114 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9115 sasl.oauthbearer.jwks.endpoint.url = null
9116 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9117 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9118 sasl.oauthbearer.scope = null
9119 sasl.oauthbearer.scope.claim.name = scope
9120 sasl.oauthbearer.sub.claim.name = sub
9121 sasl.oauthbearer.token.endpoint.url = null
9122 security.protocol = PLAINTEXT
9123 security.providers = null
9124 send.buffer.bytes = 131072
9125 session.timeout.ms = 45000
9126 share.acknowledgement.mode = implicit
9127 socket.connection.setup.timeout.max.ms = 30000
9128 socket.connection.setup.timeout.ms = 10000
9129 ssl.cipher.suites = null
9130 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9131 ssl.endpoint.identification.algorithm = https
9132 ssl.engine.factory.class = null
9133 ssl.key.password = null
9134 ssl.keymanager.algorithm = SunX509
9135 ssl.keystore.certificate.chain = null
9136 ssl.keystore.key = null
9137 ssl.keystore.location = null
9138 ssl.keystore.password = null
9139 ssl.keystore.type = JKS
9140 ssl.protocol = TLSv1.3
9141 ssl.provider = null
9142 ssl.secure.random.implementation = null
9143 ssl.trustmanager.algorithm = PKIX
9144 ssl.truststore.certificates = null
9145 ssl.truststore.location = null
9146 ssl.truststore.password = null
9147 ssl.truststore.type = JKS
9148 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9149
915015:22:00.186 [virtual-719] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
915115:22:00.188 [virtual-719] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
915215:22:00.188 [virtual-719] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
915315:22:00.188 [virtual-719] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314120188
915415:22:00.188 [virtual-720] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Subscribed to topic(s): t7_1
915515:22:00.190 [virtual-720] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
915615:22:00.191 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
915715:22:00.191 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
915815:22:00.193 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1 and requesting the member to rejoin with this id.
915915:22:00.193 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1
916015:22:00.193 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
916115:22:00.194 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1 joins group g7_1 in Empty state. Adding to the group now.
916215:22:00.194 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1 with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1).
916315:22:03.193 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 1 with 1 members.
916415:22:03.194 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1', protocol='range'}
916515:22:03.194 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Finished assignment for group at generation 1: {consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1=Assignment(partitions=[t7_1-0])}
916615:22:03.195 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1 for group g7_1 for generation 1. The group has 1 members, 0 of which are static.
916715:22:03.201 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1', protocol='range'}
916815:22:03.201 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
916915:22:03.201 [virtual-720] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
917015:22:03.202 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Found no committed offset for partition t7_1-0
917115:22:03.204 [virtual-720] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
917215:22:05.207 [virtual-722] ERROR o.k.KafkaFlow$ - Exception when polling for records
9173java.lang.InterruptedException: null
9174 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9175 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9176 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9177 at ox.channels.ActorRef.ask(actor.scala:64)
9178 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9179 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
9180 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9181 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9182 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
9183 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
9184 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
9185 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
9186 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9187 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
9188 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
9189 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
9190 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
9191 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9192 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9193 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
919415:22:05.207 [virtual-720] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9195java.lang.InterruptedException: null
9196 ... 18 common frames omitted
9197Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9198 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9199 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9200 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9201 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9202 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9203 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9204 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9205 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9206 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9207 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9208 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9209 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9210 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9211 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9212 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9213 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9214 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9215 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
921615:22:05.207 [virtual-727] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
921715:22:05.208 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Member consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
921815:22:05.208 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
921915:22:05.208 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
922015:22:05.208 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
922115:22:05.208 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_1-15-3d123d14-c173-4bd5-8194-c9dc792c7be1) members.).
922215:22:05.208 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 2 is now empty.
922315:22:05.218 [virtual-727] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
922415:22:05.218 [virtual-727] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
922515:22:05.218 [virtual-727] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
922615:22:05.218 [virtual-727] INFO o.a.k.c.m.Metrics - Metrics reporters closed
922715:22:05.219 [virtual-727] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-15 unregistered
922815:22:05.220 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9229 acks = -1
9230 batch.size = 16384
9231 bootstrap.servers = [localhost:6001]
9232 buffer.memory = 33554432
9233 client.dns.lookup = use_all_dns_ips
9234 client.id = producer-24
9235 compression.gzip.level = -1
9236 compression.lz4.level = 9
9237 compression.type = none
9238 compression.zstd.level = 3
9239 connections.max.idle.ms = 540000
9240 delivery.timeout.ms = 120000
9241 enable.idempotence = true
9242 enable.metrics.push = true
9243 interceptor.classes = []
9244 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9245 linger.ms = 5
9246 max.block.ms = 10000
9247 max.in.flight.requests.per.connection = 5
9248 max.request.size = 1048576
9249 metadata.max.age.ms = 300000
9250 metadata.max.idle.ms = 300000
9251 metadata.recovery.rebootstrap.trigger.ms = 300000
9252 metadata.recovery.strategy = rebootstrap
9253 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9254 metrics.num.samples = 2
9255 metrics.recording.level = INFO
9256 metrics.sample.window.ms = 30000
9257 partitioner.adaptive.partitioning.enable = true
9258 partitioner.availability.timeout.ms = 0
9259 partitioner.class = null
9260 partitioner.ignore.keys = false
9261 receive.buffer.bytes = 32768
9262 reconnect.backoff.max.ms = 1000
9263 reconnect.backoff.ms = 50
9264 request.timeout.ms = 30000
9265 retries = 2147483647
9266 retry.backoff.max.ms = 1000
9267 retry.backoff.ms = 1000
9268 sasl.client.callback.handler.class = null
9269 sasl.jaas.config = null
9270 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9271 sasl.kerberos.min.time.before.relogin = 60000
9272 sasl.kerberos.service.name = null
9273 sasl.kerberos.ticket.renew.jitter = 0.05
9274 sasl.kerberos.ticket.renew.window.factor = 0.8
9275 sasl.login.callback.handler.class = null
9276 sasl.login.class = null
9277 sasl.login.connect.timeout.ms = null
9278 sasl.login.read.timeout.ms = null
9279 sasl.login.refresh.buffer.seconds = 300
9280 sasl.login.refresh.min.period.seconds = 60
9281 sasl.login.refresh.window.factor = 0.8
9282 sasl.login.refresh.window.jitter = 0.05
9283 sasl.login.retry.backoff.max.ms = 10000
9284 sasl.login.retry.backoff.ms = 100
9285 sasl.mechanism = GSSAPI
9286 sasl.oauthbearer.assertion.algorithm = RS256
9287 sasl.oauthbearer.assertion.claim.aud = null
9288 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9289 sasl.oauthbearer.assertion.claim.iss = null
9290 sasl.oauthbearer.assertion.claim.jti.include = false
9291 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9292 sasl.oauthbearer.assertion.claim.sub = null
9293 sasl.oauthbearer.assertion.file = null
9294 sasl.oauthbearer.assertion.private.key.file = null
9295 sasl.oauthbearer.assertion.private.key.passphrase = null
9296 sasl.oauthbearer.assertion.template.file = null
9297 sasl.oauthbearer.client.credentials.client.id = null
9298 sasl.oauthbearer.client.credentials.client.secret = null
9299 sasl.oauthbearer.clock.skew.seconds = 30
9300 sasl.oauthbearer.expected.audience = null
9301 sasl.oauthbearer.expected.issuer = null
9302 sasl.oauthbearer.header.urlencode = false
9303 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9304 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9305 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9306 sasl.oauthbearer.jwks.endpoint.url = null
9307 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9308 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9309 sasl.oauthbearer.scope = null
9310 sasl.oauthbearer.scope.claim.name = scope
9311 sasl.oauthbearer.sub.claim.name = sub
9312 sasl.oauthbearer.token.endpoint.url = null
9313 security.protocol = PLAINTEXT
9314 security.providers = null
9315 send.buffer.bytes = 131072
9316 socket.connection.setup.timeout.max.ms = 30000
9317 socket.connection.setup.timeout.ms = 10000
9318 ssl.cipher.suites = null
9319 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9320 ssl.endpoint.identification.algorithm = https
9321 ssl.engine.factory.class = null
9322 ssl.key.password = null
9323 ssl.keymanager.algorithm = SunX509
9324 ssl.keystore.certificate.chain = null
9325 ssl.keystore.key = null
9326 ssl.keystore.location = null
9327 ssl.keystore.password = null
9328 ssl.keystore.type = JKS
9329 ssl.protocol = TLSv1.3
9330 ssl.provider = null
9331 ssl.secure.random.implementation = null
9332 ssl.trustmanager.algorithm = PKIX
9333 ssl.truststore.certificates = null
9334 ssl.truststore.location = null
9335 ssl.truststore.password = null
9336 ssl.truststore.type = JKS
9337 transaction.timeout.ms = 60000
9338 transaction.two.phase.commit.enable = false
9339 transactional.id = null
9340 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9341
934215:22:05.220 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
934315:22:05.220 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Instantiated an idempotent producer.
934415:22:05.222 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
934515:22:05.222 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
934615:22:05.222 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314125222
934715:22:05.226 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.Metadata - [Producer clientId=producer-24] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
934815:22:05.226 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-24] ProducerId set to 23 with epoch 0
934915:22:05.233 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
935015:22:05.235 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
935115:22:05.235 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
935215:22:05.235 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
935315:22:05.235 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
935415:22:05.235 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-24 unregistered
935515:22:05.236 [virtual-729] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9356 allow.auto.create.topics = true
9357 auto.commit.interval.ms = 5000
9358 auto.offset.reset = earliest
9359 bootstrap.servers = [localhost:6001]
9360 check.crcs = true
9361 client.dns.lookup = use_all_dns_ips
9362 client.id = consumer-g7_1-16
9363 client.rack =
9364 connections.max.idle.ms = 540000
9365 default.api.timeout.ms = 60000
9366 enable.auto.commit = false
9367 enable.metrics.push = true
9368 exclude.internal.topics = true
9369 fetch.max.bytes = 52428800
9370 fetch.max.wait.ms = 500
9371 fetch.min.bytes = 1
9372 group.id = g7_1
9373 group.instance.id = null
9374 group.protocol = classic
9375 group.remote.assignor = null
9376 heartbeat.interval.ms = 3000
9377 interceptor.classes = []
9378 internal.leave.group.on.close = true
9379 internal.throw.on.fetch.stable.offset.unsupported = false
9380 isolation.level = read_uncommitted
9381 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9382 max.partition.fetch.bytes = 1048576
9383 max.poll.interval.ms = 300000
9384 max.poll.records = 500
9385 metadata.max.age.ms = 300000
9386 metadata.recovery.rebootstrap.trigger.ms = 300000
9387 metadata.recovery.strategy = rebootstrap
9388 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9389 metrics.num.samples = 2
9390 metrics.recording.level = INFO
9391 metrics.sample.window.ms = 30000
9392 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9393 receive.buffer.bytes = 65536
9394 reconnect.backoff.max.ms = 1000
9395 reconnect.backoff.ms = 50
9396 request.timeout.ms = 30000
9397 retry.backoff.max.ms = 1000
9398 retry.backoff.ms = 100
9399 sasl.client.callback.handler.class = null
9400 sasl.jaas.config = null
9401 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9402 sasl.kerberos.min.time.before.relogin = 60000
9403 sasl.kerberos.service.name = null
9404 sasl.kerberos.ticket.renew.jitter = 0.05
9405 sasl.kerberos.ticket.renew.window.factor = 0.8
9406 sasl.login.callback.handler.class = null
9407 sasl.login.class = null
9408 sasl.login.connect.timeout.ms = null
9409 sasl.login.read.timeout.ms = null
9410 sasl.login.refresh.buffer.seconds = 300
9411 sasl.login.refresh.min.period.seconds = 60
9412 sasl.login.refresh.window.factor = 0.8
9413 sasl.login.refresh.window.jitter = 0.05
9414 sasl.login.retry.backoff.max.ms = 10000
9415 sasl.login.retry.backoff.ms = 100
9416 sasl.mechanism = GSSAPI
9417 sasl.oauthbearer.assertion.algorithm = RS256
9418 sasl.oauthbearer.assertion.claim.aud = null
9419 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9420 sasl.oauthbearer.assertion.claim.iss = null
9421 sasl.oauthbearer.assertion.claim.jti.include = false
9422 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9423 sasl.oauthbearer.assertion.claim.sub = null
9424 sasl.oauthbearer.assertion.file = null
9425 sasl.oauthbearer.assertion.private.key.file = null
9426 sasl.oauthbearer.assertion.private.key.passphrase = null
9427 sasl.oauthbearer.assertion.template.file = null
9428 sasl.oauthbearer.client.credentials.client.id = null
9429 sasl.oauthbearer.client.credentials.client.secret = null
9430 sasl.oauthbearer.clock.skew.seconds = 30
9431 sasl.oauthbearer.expected.audience = null
9432 sasl.oauthbearer.expected.issuer = null
9433 sasl.oauthbearer.header.urlencode = false
9434 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9435 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9436 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9437 sasl.oauthbearer.jwks.endpoint.url = null
9438 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9439 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9440 sasl.oauthbearer.scope = null
9441 sasl.oauthbearer.scope.claim.name = scope
9442 sasl.oauthbearer.sub.claim.name = sub
9443 sasl.oauthbearer.token.endpoint.url = null
9444 security.protocol = PLAINTEXT
9445 security.providers = null
9446 send.buffer.bytes = 131072
9447 session.timeout.ms = 45000
9448 share.acknowledgement.mode = implicit
9449 socket.connection.setup.timeout.max.ms = 30000
9450 socket.connection.setup.timeout.ms = 10000
9451 ssl.cipher.suites = null
9452 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9453 ssl.endpoint.identification.algorithm = https
9454 ssl.engine.factory.class = null
9455 ssl.key.password = null
9456 ssl.keymanager.algorithm = SunX509
9457 ssl.keystore.certificate.chain = null
9458 ssl.keystore.key = null
9459 ssl.keystore.location = null
9460 ssl.keystore.password = null
9461 ssl.keystore.type = JKS
9462 ssl.protocol = TLSv1.3
9463 ssl.provider = null
9464 ssl.secure.random.implementation = null
9465 ssl.trustmanager.algorithm = PKIX
9466 ssl.truststore.certificates = null
9467 ssl.truststore.location = null
9468 ssl.truststore.password = null
9469 ssl.truststore.type = JKS
9470 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9471
947215:22:05.236 [virtual-729] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
947315:22:05.239 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
947415:22:05.239 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
947515:22:05.239 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314125239
947615:22:05.240 [virtual-732] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Subscribed to topic(s): t7_1
947715:22:05.242 [virtual-732] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
947815:22:05.243 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
947915:22:05.244 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
948015:22:05.245 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802 and requesting the member to rejoin with this id.
948115:22:05.245 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802
948215:22:05.246 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
948315:22:05.246 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802 joins group g7_1 in Empty state. Adding to the group now.
948415:22:05.246 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802 with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802).
948515:22:08.247 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 3 with 1 members.
948615:22:08.247 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802', protocol='range'}
948715:22:08.248 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Finished assignment for group at generation 3: {consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802=Assignment(partitions=[t7_1-0])}
948815:22:08.248 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802 for group g7_1 for generation 3. The group has 1 members, 0 of which are static.
948915:22:08.254 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802', protocol='range'}
949015:22:08.254 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
949115:22:08.254 [virtual-732] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
949215:22:08.255 [virtual-732] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t7_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
949315:22:08.259 [virtual-729] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9494 allow.auto.create.topics = true
9495 auto.commit.interval.ms = 5000
9496 auto.offset.reset = earliest
9497 bootstrap.servers = [localhost:6001]
9498 check.crcs = true
9499 client.dns.lookup = use_all_dns_ips
9500 client.id = consumer-g7_2-17
9501 client.rack =
9502 connections.max.idle.ms = 540000
9503 default.api.timeout.ms = 60000
9504 enable.auto.commit = false
9505 enable.metrics.push = true
9506 exclude.internal.topics = true
9507 fetch.max.bytes = 52428800
9508 fetch.max.wait.ms = 500
9509 fetch.min.bytes = 1
9510 group.id = g7_2
9511 group.instance.id = null
9512 group.protocol = classic
9513 group.remote.assignor = null
9514 heartbeat.interval.ms = 3000
9515 interceptor.classes = []
9516 internal.leave.group.on.close = true
9517 internal.throw.on.fetch.stable.offset.unsupported = false
9518 isolation.level = read_uncommitted
9519 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9520 max.partition.fetch.bytes = 1048576
9521 max.poll.interval.ms = 300000
9522 max.poll.records = 500
9523 metadata.max.age.ms = 300000
9524 metadata.recovery.rebootstrap.trigger.ms = 300000
9525 metadata.recovery.strategy = rebootstrap
9526 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9527 metrics.num.samples = 2
9528 metrics.recording.level = INFO
9529 metrics.sample.window.ms = 30000
9530 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9531 receive.buffer.bytes = 65536
9532 reconnect.backoff.max.ms = 1000
9533 reconnect.backoff.ms = 50
9534 request.timeout.ms = 30000
9535 retry.backoff.max.ms = 1000
9536 retry.backoff.ms = 100
9537 sasl.client.callback.handler.class = null
9538 sasl.jaas.config = null
9539 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9540 sasl.kerberos.min.time.before.relogin = 60000
9541 sasl.kerberos.service.name = null
9542 sasl.kerberos.ticket.renew.jitter = 0.05
9543 sasl.kerberos.ticket.renew.window.factor = 0.8
9544 sasl.login.callback.handler.class = null
9545 sasl.login.class = null
9546 sasl.login.connect.timeout.ms = null
9547 sasl.login.read.timeout.ms = null
9548 sasl.login.refresh.buffer.seconds = 300
9549 sasl.login.refresh.min.period.seconds = 60
9550 sasl.login.refresh.window.factor = 0.8
9551 sasl.login.refresh.window.jitter = 0.05
9552 sasl.login.retry.backoff.max.ms = 10000
9553 sasl.login.retry.backoff.ms = 100
9554 sasl.mechanism = GSSAPI
9555 sasl.oauthbearer.assertion.algorithm = RS256
9556 sasl.oauthbearer.assertion.claim.aud = null
9557 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9558 sasl.oauthbearer.assertion.claim.iss = null
9559 sasl.oauthbearer.assertion.claim.jti.include = false
9560 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9561 sasl.oauthbearer.assertion.claim.sub = null
9562 sasl.oauthbearer.assertion.file = null
9563 sasl.oauthbearer.assertion.private.key.file = null
9564 sasl.oauthbearer.assertion.private.key.passphrase = null
9565 sasl.oauthbearer.assertion.template.file = null
9566 sasl.oauthbearer.client.credentials.client.id = null
9567 sasl.oauthbearer.client.credentials.client.secret = null
9568 sasl.oauthbearer.clock.skew.seconds = 30
9569 sasl.oauthbearer.expected.audience = null
9570 sasl.oauthbearer.expected.issuer = null
9571 sasl.oauthbearer.header.urlencode = false
9572 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9573 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9574 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9575 sasl.oauthbearer.jwks.endpoint.url = null
9576 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9577 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9578 sasl.oauthbearer.scope = null
9579 sasl.oauthbearer.scope.claim.name = scope
9580 sasl.oauthbearer.sub.claim.name = sub
9581 sasl.oauthbearer.token.endpoint.url = null
9582 security.protocol = PLAINTEXT
9583 security.providers = null
9584 send.buffer.bytes = 131072
9585 session.timeout.ms = 45000
9586 share.acknowledgement.mode = implicit
9587 socket.connection.setup.timeout.max.ms = 30000
9588 socket.connection.setup.timeout.ms = 10000
9589 ssl.cipher.suites = null
9590 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9591 ssl.endpoint.identification.algorithm = https
9592 ssl.engine.factory.class = null
9593 ssl.key.password = null
9594 ssl.keymanager.algorithm = SunX509
9595 ssl.keystore.certificate.chain = null
9596 ssl.keystore.key = null
9597 ssl.keystore.location = null
9598 ssl.keystore.password = null
9599 ssl.keystore.type = JKS
9600 ssl.protocol = TLSv1.3
9601 ssl.provider = null
9602 ssl.secure.random.implementation = null
9603 ssl.trustmanager.algorithm = PKIX
9604 ssl.truststore.certificates = null
9605 ssl.truststore.location = null
9606 ssl.truststore.password = null
9607 ssl.truststore.type = JKS
9608 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9609
961015:22:08.259 [virtual-729] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
961115:22:08.261 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
961215:22:08.261 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
961315:22:08.261 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314128261
961415:22:08.262 [virtual-736] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Subscribed to topic(s): t7_1
961515:22:08.265 [virtual-736] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
961615:22:08.265 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
961715:22:08.265 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
961815:22:08.268 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_2 in Empty state. Created a new member id consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511 and requesting the member to rejoin with this id.
961915:22:08.268 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: need to re-join with the given member-id: consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511
962015:22:08.268 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
962115:22:08.269 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511 joins group g7_2 in Empty state. Adding to the group now.
962215:22:08.269 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511 with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511).
962315:22:11.269 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_2 generation 1 with 1 members.
962415:22:11.270 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511', protocol='range'}
962515:22:11.270 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Finished assignment for group at generation 1: {consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511=Assignment(partitions=[t7_1-0])}
962615:22:11.271 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511 for group g7_2 for generation 1. The group has 1 members, 0 of which are static.
962715:22:11.277 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511', protocol='range'}
962815:22:11.277 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Notifying assignor about the new Assignment(partitions=[t7_1-0])
962915:22:11.277 [virtual-736] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Adding newly assigned partitions: [t7_1-0]
963015:22:11.278 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Found no committed offset for partition t7_1-0
963115:22:11.280 [virtual-736] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
963215:22:11.282 [virtual-731] ERROR o.k.KafkaFlow$ - Exception when polling for records
9633java.lang.InterruptedException: null
9634 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9635 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9636 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9637 at ox.channels.ActorRef.ask(actor.scala:64)
9638 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9639 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9640 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9641 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9642 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9643 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9644 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9645 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9646 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9647 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
964815:22:11.282 [virtual-735] ERROR o.k.KafkaFlow$ - Exception when polling for records
9649java.lang.InterruptedException: null
9650 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9651 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9652 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9653 at ox.channels.ActorRef.ask(actor.scala:64)
9654 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9655 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9656 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9657 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9658 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9659 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9660 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9661 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9662 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9663 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
966415:22:11.282 [virtual-732] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9665java.lang.InterruptedException: null
9666 ... 18 common frames omitted
9667Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9668 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9669 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9670 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9671 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9672 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9673 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9674 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9675 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9676 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9677 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9678 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9679 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9680 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9681 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9682 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9683 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9684 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9685 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
968615:22:11.282 [virtual-736] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9687java.lang.InterruptedException: null
9688 ... 18 common frames omitted
9689Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9690 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9691 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9692 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9693 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9694 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9695 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9696 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9697 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9698 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9699 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9700 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9701 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9702 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9703 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9704 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9705 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9706 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9707 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
970815:22:11.283 [virtual-739] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Revoke previously assigned partitions [t7_1-0]
970915:22:11.283 [virtual-739] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Member consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
971015:22:11.284 [virtual-739] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting generation and member id due to: consumer pro-actively leaving the group
971115:22:11.284 [virtual-739] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: consumer pro-actively leaving the group
971215:22:11.284 [virtual-738] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
971315:22:11.284 [virtual-738] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Member consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
971415:22:11.284 [virtual-738] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
971515:22:11.285 [virtual-738] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
971615:22:11.286 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_2] Member consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
971715:22:11.286 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_2-17-3f04f026-cbd7-49f7-96f1-bf318538f511) members.).
971815:22:11.286 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_2 with generation 2 is now empty.
971915:22:11.286 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
972015:22:11.286 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g7_1-16-ecdf99bf-4013-4b53-8bb9-bc60ff043802) members.).
972115:22:11.286 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 4 is now empty.
972215:22:11.769 [virtual-738] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
972315:22:11.769 [virtual-738] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
972415:22:11.769 [virtual-738] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
972515:22:11.769 [virtual-738] INFO o.a.k.c.m.Metrics - Metrics reporters closed
972615:22:11.771 [virtual-738] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-16 unregistered
972715:22:11.783 [virtual-739] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
972815:22:11.783 [virtual-739] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
972915:22:11.783 [virtual-739] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
973015:22:11.783 [virtual-739] INFO o.a.k.c.m.Metrics - Metrics reporters closed
973115:22:11.784 [virtual-739] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_2-17 unregistered
973215:22:11.786 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9733 acks = -1
9734 batch.size = 16384
9735 bootstrap.servers = [localhost:6001]
9736 buffer.memory = 33554432
9737 client.dns.lookup = use_all_dns_ips
9738 client.id = producer-25
9739 compression.gzip.level = -1
9740 compression.lz4.level = 9
9741 compression.type = none
9742 compression.zstd.level = 3
9743 connections.max.idle.ms = 540000
9744 delivery.timeout.ms = 120000
9745 enable.idempotence = true
9746 enable.metrics.push = true
9747 interceptor.classes = []
9748 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9749 linger.ms = 5
9750 max.block.ms = 10000
9751 max.in.flight.requests.per.connection = 5
9752 max.request.size = 1048576
9753 metadata.max.age.ms = 300000
9754 metadata.max.idle.ms = 300000
9755 metadata.recovery.rebootstrap.trigger.ms = 300000
9756 metadata.recovery.strategy = rebootstrap
9757 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9758 metrics.num.samples = 2
9759 metrics.recording.level = INFO
9760 metrics.sample.window.ms = 30000
9761 partitioner.adaptive.partitioning.enable = true
9762 partitioner.availability.timeout.ms = 0
9763 partitioner.class = null
9764 partitioner.ignore.keys = false
9765 receive.buffer.bytes = 32768
9766 reconnect.backoff.max.ms = 1000
9767 reconnect.backoff.ms = 50
9768 request.timeout.ms = 30000
9769 retries = 2147483647
9770 retry.backoff.max.ms = 1000
9771 retry.backoff.ms = 1000
9772 sasl.client.callback.handler.class = null
9773 sasl.jaas.config = null
9774 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9775 sasl.kerberos.min.time.before.relogin = 60000
9776 sasl.kerberos.service.name = null
9777 sasl.kerberos.ticket.renew.jitter = 0.05
9778 sasl.kerberos.ticket.renew.window.factor = 0.8
9779 sasl.login.callback.handler.class = null
9780 sasl.login.class = null
9781 sasl.login.connect.timeout.ms = null
9782 sasl.login.read.timeout.ms = null
9783 sasl.login.refresh.buffer.seconds = 300
9784 sasl.login.refresh.min.period.seconds = 60
9785 sasl.login.refresh.window.factor = 0.8
9786 sasl.login.refresh.window.jitter = 0.05
9787 sasl.login.retry.backoff.max.ms = 10000
9788 sasl.login.retry.backoff.ms = 100
9789 sasl.mechanism = GSSAPI
9790 sasl.oauthbearer.assertion.algorithm = RS256
9791 sasl.oauthbearer.assertion.claim.aud = null
9792 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9793 sasl.oauthbearer.assertion.claim.iss = null
9794 sasl.oauthbearer.assertion.claim.jti.include = false
9795 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9796 sasl.oauthbearer.assertion.claim.sub = null
9797 sasl.oauthbearer.assertion.file = null
9798 sasl.oauthbearer.assertion.private.key.file = null
9799 sasl.oauthbearer.assertion.private.key.passphrase = null
9800 sasl.oauthbearer.assertion.template.file = null
9801 sasl.oauthbearer.client.credentials.client.id = null
9802 sasl.oauthbearer.client.credentials.client.secret = null
9803 sasl.oauthbearer.clock.skew.seconds = 30
9804 sasl.oauthbearer.expected.audience = null
9805 sasl.oauthbearer.expected.issuer = null
9806 sasl.oauthbearer.header.urlencode = false
9807 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9808 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9809 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9810 sasl.oauthbearer.jwks.endpoint.url = null
9811 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9812 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9813 sasl.oauthbearer.scope = null
9814 sasl.oauthbearer.scope.claim.name = scope
9815 sasl.oauthbearer.sub.claim.name = sub
9816 sasl.oauthbearer.token.endpoint.url = null
9817 security.protocol = PLAINTEXT
9818 security.providers = null
9819 send.buffer.bytes = 131072
9820 socket.connection.setup.timeout.max.ms = 30000
9821 socket.connection.setup.timeout.ms = 10000
9822 ssl.cipher.suites = null
9823 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9824 ssl.endpoint.identification.algorithm = https
9825 ssl.engine.factory.class = null
9826 ssl.key.password = null
9827 ssl.keymanager.algorithm = SunX509
9828 ssl.keystore.certificate.chain = null
9829 ssl.keystore.key = null
9830 ssl.keystore.location = null
9831 ssl.keystore.password = null
9832 ssl.keystore.type = JKS
9833 ssl.protocol = TLSv1.3
9834 ssl.provider = null
9835 ssl.secure.random.implementation = null
9836 ssl.trustmanager.algorithm = PKIX
9837 ssl.truststore.certificates = null
9838 ssl.truststore.location = null
9839 ssl.truststore.password = null
9840 ssl.truststore.type = JKS
9841 transaction.timeout.ms = 60000
9842 transaction.two.phase.commit.enable = false
9843 transactional.id = null
9844 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9845
984615:22:11.786 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
984715:22:11.786 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Instantiated an idempotent producer.
984815:22:11.788 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
984915:22:11.788 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
985015:22:11.788 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314131788
985115:22:11.790 [data-plane-kafka-request-handler-2] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t8_1) to the active controller.
985215:22:11.790 [kafka-producer-network-thread | producer-25] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-25] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t8_1=UNKNOWN_TOPIC_OR_PARTITION}
985315:22:11.790 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.Metadata - [Producer clientId=producer-25] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
985415:22:11.791 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-25] ProducerId set to 24 with epoch 0
985515:22:11.792 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t8_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
985615:22:11.792 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t8_1 with topic ID 4waYU3nHQoS8nvBTQs1BWg.
985715:22:11.792 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t8_1-0 with topic ID 4waYU3nHQoS8nvBTQs1BWg and PartitionRegistration(replicas=[0], directories=[bfxY4JLWDJqXknmblZ20cw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
985815:22:11.818 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
985915:22:11.818 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t8_1-0)
986015:22:11.818 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t8_1-0 with topic id 4waYU3nHQoS8nvBTQs1BWg.
986115:22:11.820 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t8_1-0, dir=/tmp/kafka-logs5982689497894266552] Loading producer state till offset 0
986215:22:11.821 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t8_1-0 in /tmp/kafka-logs5982689497894266552/t8_1-0 with properties {}
986315:22:11.821 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] No checkpointed highwatermark is found for partition t8_1-0
986415:22:11.821 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] Log loaded for partition t8_1-0 with initial high watermark 0
986515:22:11.821 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t8_1-0 with topic id Some(4waYU3nHQoS8nvBTQs1BWg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
986615:22:12.801 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
986715:22:12.803 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
986815:22:12.803 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
986915:22:12.803 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
987015:22:12.803 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
987115:22:12.804 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-25 unregistered
987215:22:12.804 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9873 acks = -1
9874 batch.size = 16384
9875 bootstrap.servers = [localhost:6001]
9876 buffer.memory = 33554432
9877 client.dns.lookup = use_all_dns_ips
9878 client.id = producer-26
9879 compression.gzip.level = -1
9880 compression.lz4.level = 9
9881 compression.type = none
9882 compression.zstd.level = 3
9883 connections.max.idle.ms = 540000
9884 delivery.timeout.ms = 120000
9885 enable.idempotence = true
9886 enable.metrics.push = true
9887 interceptor.classes = []
9888 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9889 linger.ms = 5
9890 max.block.ms = 10000
9891 max.in.flight.requests.per.connection = 5
9892 max.request.size = 1048576
9893 metadata.max.age.ms = 300000
9894 metadata.max.idle.ms = 300000
9895 metadata.recovery.rebootstrap.trigger.ms = 300000
9896 metadata.recovery.strategy = rebootstrap
9897 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9898 metrics.num.samples = 2
9899 metrics.recording.level = INFO
9900 metrics.sample.window.ms = 30000
9901 partitioner.adaptive.partitioning.enable = true
9902 partitioner.availability.timeout.ms = 0
9903 partitioner.class = null
9904 partitioner.ignore.keys = false
9905 receive.buffer.bytes = 32768
9906 reconnect.backoff.max.ms = 1000
9907 reconnect.backoff.ms = 50
9908 request.timeout.ms = 30000
9909 retries = 2147483647
9910 retry.backoff.max.ms = 1000
9911 retry.backoff.ms = 1000
9912 sasl.client.callback.handler.class = null
9913 sasl.jaas.config = null
9914 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9915 sasl.kerberos.min.time.before.relogin = 60000
9916 sasl.kerberos.service.name = null
9917 sasl.kerberos.ticket.renew.jitter = 0.05
9918 sasl.kerberos.ticket.renew.window.factor = 0.8
9919 sasl.login.callback.handler.class = null
9920 sasl.login.class = null
9921 sasl.login.connect.timeout.ms = null
9922 sasl.login.read.timeout.ms = null
9923 sasl.login.refresh.buffer.seconds = 300
9924 sasl.login.refresh.min.period.seconds = 60
9925 sasl.login.refresh.window.factor = 0.8
9926 sasl.login.refresh.window.jitter = 0.05
9927 sasl.login.retry.backoff.max.ms = 10000
9928 sasl.login.retry.backoff.ms = 100
9929 sasl.mechanism = GSSAPI
9930 sasl.oauthbearer.assertion.algorithm = RS256
9931 sasl.oauthbearer.assertion.claim.aud = null
9932 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9933 sasl.oauthbearer.assertion.claim.iss = null
9934 sasl.oauthbearer.assertion.claim.jti.include = false
9935 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9936 sasl.oauthbearer.assertion.claim.sub = null
9937 sasl.oauthbearer.assertion.file = null
9938 sasl.oauthbearer.assertion.private.key.file = null
9939 sasl.oauthbearer.assertion.private.key.passphrase = null
9940 sasl.oauthbearer.assertion.template.file = null
9941 sasl.oauthbearer.client.credentials.client.id = null
9942 sasl.oauthbearer.client.credentials.client.secret = null
9943 sasl.oauthbearer.clock.skew.seconds = 30
9944 sasl.oauthbearer.expected.audience = null
9945 sasl.oauthbearer.expected.issuer = null
9946 sasl.oauthbearer.header.urlencode = false
9947 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9948 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9949 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9950 sasl.oauthbearer.jwks.endpoint.url = null
9951 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9952 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9953 sasl.oauthbearer.scope = null
9954 sasl.oauthbearer.scope.claim.name = scope
9955 sasl.oauthbearer.sub.claim.name = sub
9956 sasl.oauthbearer.token.endpoint.url = null
9957 security.protocol = PLAINTEXT
9958 security.providers = null
9959 send.buffer.bytes = 131072
9960 socket.connection.setup.timeout.max.ms = 30000
9961 socket.connection.setup.timeout.ms = 10000
9962 ssl.cipher.suites = null
9963 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9964 ssl.endpoint.identification.algorithm = https
9965 ssl.engine.factory.class = null
9966 ssl.key.password = null
9967 ssl.keymanager.algorithm = SunX509
9968 ssl.keystore.certificate.chain = null
9969 ssl.keystore.key = null
9970 ssl.keystore.location = null
9971 ssl.keystore.password = null
9972 ssl.keystore.type = JKS
9973 ssl.protocol = TLSv1.3
9974 ssl.provider = null
9975 ssl.secure.random.implementation = null
9976 ssl.trustmanager.algorithm = PKIX
9977 ssl.truststore.certificates = null
9978 ssl.truststore.location = null
9979 ssl.truststore.password = null
9980 ssl.truststore.type = JKS
9981 transaction.timeout.ms = 60000
9982 transaction.two.phase.commit.enable = false
9983 transactional.id = null
9984 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9985
998615:22:12.804 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
998715:22:12.804 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Instantiated an idempotent producer.
998815:22:12.806 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
998915:22:12.806 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
999015:22:12.806 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314132806
999115:22:12.808 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.Metadata - [Producer clientId=producer-26] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
999215:22:12.809 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-26] ProducerId set to 25 with epoch 0
999315:22:12.817 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
999415:22:12.818 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
999515:22:12.819 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
999615:22:12.819 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
999715:22:12.819 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
999815:22:12.819 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-26 unregistered
999915:22:12.820 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10000 acks = -1
10001 batch.size = 16384
10002 bootstrap.servers = [localhost:6001]
10003 buffer.memory = 33554432
10004 client.dns.lookup = use_all_dns_ips
10005 client.id = producer-27
10006 compression.gzip.level = -1
10007 compression.lz4.level = 9
10008 compression.type = none
10009 compression.zstd.level = 3
10010 connections.max.idle.ms = 540000
10011 delivery.timeout.ms = 120000
10012 enable.idempotence = true
10013 enable.metrics.push = true
10014 interceptor.classes = []
10015 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10016 linger.ms = 5
10017 max.block.ms = 10000
10018 max.in.flight.requests.per.connection = 5
10019 max.request.size = 1048576
10020 metadata.max.age.ms = 300000
10021 metadata.max.idle.ms = 300000
10022 metadata.recovery.rebootstrap.trigger.ms = 300000
10023 metadata.recovery.strategy = rebootstrap
10024 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10025 metrics.num.samples = 2
10026 metrics.recording.level = INFO
10027 metrics.sample.window.ms = 30000
10028 partitioner.adaptive.partitioning.enable = true
10029 partitioner.availability.timeout.ms = 0
10030 partitioner.class = null
10031 partitioner.ignore.keys = false
10032 receive.buffer.bytes = 32768
10033 reconnect.backoff.max.ms = 1000
10034 reconnect.backoff.ms = 50
10035 request.timeout.ms = 30000
10036 retries = 2147483647
10037 retry.backoff.max.ms = 1000
10038 retry.backoff.ms = 1000
10039 sasl.client.callback.handler.class = null
10040 sasl.jaas.config = null
10041 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10042 sasl.kerberos.min.time.before.relogin = 60000
10043 sasl.kerberos.service.name = null
10044 sasl.kerberos.ticket.renew.jitter = 0.05
10045 sasl.kerberos.ticket.renew.window.factor = 0.8
10046 sasl.login.callback.handler.class = null
10047 sasl.login.class = null
10048 sasl.login.connect.timeout.ms = null
10049 sasl.login.read.timeout.ms = null
10050 sasl.login.refresh.buffer.seconds = 300
10051 sasl.login.refresh.min.period.seconds = 60
10052 sasl.login.refresh.window.factor = 0.8
10053 sasl.login.refresh.window.jitter = 0.05
10054 sasl.login.retry.backoff.max.ms = 10000
10055 sasl.login.retry.backoff.ms = 100
10056 sasl.mechanism = GSSAPI
10057 sasl.oauthbearer.assertion.algorithm = RS256
10058 sasl.oauthbearer.assertion.claim.aud = null
10059 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10060 sasl.oauthbearer.assertion.claim.iss = null
10061 sasl.oauthbearer.assertion.claim.jti.include = false
10062 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10063 sasl.oauthbearer.assertion.claim.sub = null
10064 sasl.oauthbearer.assertion.file = null
10065 sasl.oauthbearer.assertion.private.key.file = null
10066 sasl.oauthbearer.assertion.private.key.passphrase = null
10067 sasl.oauthbearer.assertion.template.file = null
10068 sasl.oauthbearer.client.credentials.client.id = null
10069 sasl.oauthbearer.client.credentials.client.secret = null
10070 sasl.oauthbearer.clock.skew.seconds = 30
10071 sasl.oauthbearer.expected.audience = null
10072 sasl.oauthbearer.expected.issuer = null
10073 sasl.oauthbearer.header.urlencode = false
10074 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10075 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10076 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10077 sasl.oauthbearer.jwks.endpoint.url = null
10078 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10079 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10080 sasl.oauthbearer.scope = null
10081 sasl.oauthbearer.scope.claim.name = scope
10082 sasl.oauthbearer.sub.claim.name = sub
10083 sasl.oauthbearer.token.endpoint.url = null
10084 security.protocol = PLAINTEXT
10085 security.providers = null
10086 send.buffer.bytes = 131072
10087 socket.connection.setup.timeout.max.ms = 30000
10088 socket.connection.setup.timeout.ms = 10000
10089 ssl.cipher.suites = null
10090 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10091 ssl.endpoint.identification.algorithm = https
10092 ssl.engine.factory.class = null
10093 ssl.key.password = null
10094 ssl.keymanager.algorithm = SunX509
10095 ssl.keystore.certificate.chain = null
10096 ssl.keystore.key = null
10097 ssl.keystore.location = null
10098 ssl.keystore.password = null
10099 ssl.keystore.type = JKS
10100 ssl.protocol = TLSv1.3
10101 ssl.provider = null
10102 ssl.secure.random.implementation = null
10103 ssl.trustmanager.algorithm = PKIX
10104 ssl.truststore.certificates = null
10105 ssl.truststore.location = null
10106 ssl.truststore.password = null
10107 ssl.truststore.type = JKS
10108 transaction.timeout.ms = 60000
10109 transaction.two.phase.commit.enable = false
10110 transactional.id = null
10111 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10112
1011315:22:12.820 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1011415:22:12.820 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Instantiated an idempotent producer.
1011515:22:12.823 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1011615:22:12.823 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1011715:22:12.823 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314132823
1011815:22:12.826 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.Metadata - [Producer clientId=producer-27] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
1011915:22:12.826 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-27] ProducerId set to 26 with epoch 0
1012015:22:12.835 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1012115:22:12.836 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1012215:22:12.836 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1012315:22:12.836 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1012415:22:12.836 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1012515:22:12.836 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-27 unregistered
1012615:22:12.837 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10127 acks = -1
10128 batch.size = 16384
10129 bootstrap.servers = [localhost:6001]
10130 buffer.memory = 33554432
10131 client.dns.lookup = use_all_dns_ips
10132 client.id = producer-28
10133 compression.gzip.level = -1
10134 compression.lz4.level = 9
10135 compression.type = none
10136 compression.zstd.level = 3
10137 connections.max.idle.ms = 540000
10138 delivery.timeout.ms = 120000
10139 enable.idempotence = true
10140 enable.metrics.push = true
10141 interceptor.classes = []
10142 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10143 linger.ms = 5
10144 max.block.ms = 10000
10145 max.in.flight.requests.per.connection = 5
10146 max.request.size = 1048576
10147 metadata.max.age.ms = 300000
10148 metadata.max.idle.ms = 300000
10149 metadata.recovery.rebootstrap.trigger.ms = 300000
10150 metadata.recovery.strategy = rebootstrap
10151 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10152 metrics.num.samples = 2
10153 metrics.recording.level = INFO
10154 metrics.sample.window.ms = 30000
10155 partitioner.adaptive.partitioning.enable = true
10156 partitioner.availability.timeout.ms = 0
10157 partitioner.class = null
10158 partitioner.ignore.keys = false
10159 receive.buffer.bytes = 32768
10160 reconnect.backoff.max.ms = 1000
10161 reconnect.backoff.ms = 50
10162 request.timeout.ms = 30000
10163 retries = 2147483647
10164 retry.backoff.max.ms = 1000
10165 retry.backoff.ms = 1000
10166 sasl.client.callback.handler.class = null
10167 sasl.jaas.config = null
10168 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10169 sasl.kerberos.min.time.before.relogin = 60000
10170 sasl.kerberos.service.name = null
10171 sasl.kerberos.ticket.renew.jitter = 0.05
10172 sasl.kerberos.ticket.renew.window.factor = 0.8
10173 sasl.login.callback.handler.class = null
10174 sasl.login.class = null
10175 sasl.login.connect.timeout.ms = null
10176 sasl.login.read.timeout.ms = null
10177 sasl.login.refresh.buffer.seconds = 300
10178 sasl.login.refresh.min.period.seconds = 60
10179 sasl.login.refresh.window.factor = 0.8
10180 sasl.login.refresh.window.jitter = 0.05
10181 sasl.login.retry.backoff.max.ms = 10000
10182 sasl.login.retry.backoff.ms = 100
10183 sasl.mechanism = GSSAPI
10184 sasl.oauthbearer.assertion.algorithm = RS256
10185 sasl.oauthbearer.assertion.claim.aud = null
10186 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10187 sasl.oauthbearer.assertion.claim.iss = null
10188 sasl.oauthbearer.assertion.claim.jti.include = false
10189 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10190 sasl.oauthbearer.assertion.claim.sub = null
10191 sasl.oauthbearer.assertion.file = null
10192 sasl.oauthbearer.assertion.private.key.file = null
10193 sasl.oauthbearer.assertion.private.key.passphrase = null
10194 sasl.oauthbearer.assertion.template.file = null
10195 sasl.oauthbearer.client.credentials.client.id = null
10196 sasl.oauthbearer.client.credentials.client.secret = null
10197 sasl.oauthbearer.clock.skew.seconds = 30
10198 sasl.oauthbearer.expected.audience = null
10199 sasl.oauthbearer.expected.issuer = null
10200 sasl.oauthbearer.header.urlencode = false
10201 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10202 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10203 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10204 sasl.oauthbearer.jwks.endpoint.url = null
10205 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10206 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10207 sasl.oauthbearer.scope = null
10208 sasl.oauthbearer.scope.claim.name = scope
10209 sasl.oauthbearer.sub.claim.name = sub
10210 sasl.oauthbearer.token.endpoint.url = null
10211 security.protocol = PLAINTEXT
10212 security.providers = null
10213 send.buffer.bytes = 131072
10214 socket.connection.setup.timeout.max.ms = 30000
10215 socket.connection.setup.timeout.ms = 10000
10216 ssl.cipher.suites = null
10217 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10218 ssl.endpoint.identification.algorithm = https
10219 ssl.engine.factory.class = null
10220 ssl.key.password = null
10221 ssl.keymanager.algorithm = SunX509
10222 ssl.keystore.certificate.chain = null
10223 ssl.keystore.key = null
10224 ssl.keystore.location = null
10225 ssl.keystore.password = null
10226 ssl.keystore.type = JKS
10227 ssl.protocol = TLSv1.3
10228 ssl.provider = null
10229 ssl.secure.random.implementation = null
10230 ssl.trustmanager.algorithm = PKIX
10231 ssl.truststore.certificates = null
10232 ssl.truststore.location = null
10233 ssl.truststore.password = null
10234 ssl.truststore.type = JKS
10235 transaction.timeout.ms = 60000
10236 transaction.two.phase.commit.enable = false
10237 transactional.id = null
10238 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10239
1024015:22:12.838 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1024115:22:12.838 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Instantiated an idempotent producer.
1024215:22:12.841 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1024315:22:12.841 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1024415:22:12.841 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314132841
1024515:22:12.843 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.Metadata - [Producer clientId=producer-28] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
1024615:22:12.844 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-28] ProducerId set to 27 with epoch 0
1024715:22:12.853 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1024815:22:12.855 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1024915:22:12.855 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1025015:22:12.855 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1025115:22:12.855 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1025215:22:12.855 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-28 unregistered
1025315:22:12.855 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10254 acks = -1
10255 batch.size = 16384
10256 bootstrap.servers = [localhost:6001]
10257 buffer.memory = 33554432
10258 client.dns.lookup = use_all_dns_ips
10259 client.id = producer-29
10260 compression.gzip.level = -1
10261 compression.lz4.level = 9
10262 compression.type = none
10263 compression.zstd.level = 3
10264 connections.max.idle.ms = 540000
10265 delivery.timeout.ms = 120000
10266 enable.idempotence = true
10267 enable.metrics.push = true
10268 interceptor.classes = []
10269 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10270 linger.ms = 5
10271 max.block.ms = 10000
10272 max.in.flight.requests.per.connection = 5
10273 max.request.size = 1048576
10274 metadata.max.age.ms = 300000
10275 metadata.max.idle.ms = 300000
10276 metadata.recovery.rebootstrap.trigger.ms = 300000
10277 metadata.recovery.strategy = rebootstrap
10278 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10279 metrics.num.samples = 2
10280 metrics.recording.level = INFO
10281 metrics.sample.window.ms = 30000
10282 partitioner.adaptive.partitioning.enable = true
10283 partitioner.availability.timeout.ms = 0
10284 partitioner.class = null
10285 partitioner.ignore.keys = false
10286 receive.buffer.bytes = 32768
10287 reconnect.backoff.max.ms = 1000
10288 reconnect.backoff.ms = 50
10289 request.timeout.ms = 30000
10290 retries = 2147483647
10291 retry.backoff.max.ms = 1000
10292 retry.backoff.ms = 1000
10293 sasl.client.callback.handler.class = null
10294 sasl.jaas.config = null
10295 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10296 sasl.kerberos.min.time.before.relogin = 60000
10297 sasl.kerberos.service.name = null
10298 sasl.kerberos.ticket.renew.jitter = 0.05
10299 sasl.kerberos.ticket.renew.window.factor = 0.8
10300 sasl.login.callback.handler.class = null
10301 sasl.login.class = null
10302 sasl.login.connect.timeout.ms = null
10303 sasl.login.read.timeout.ms = null
10304 sasl.login.refresh.buffer.seconds = 300
10305 sasl.login.refresh.min.period.seconds = 60
10306 sasl.login.refresh.window.factor = 0.8
10307 sasl.login.refresh.window.jitter = 0.05
10308 sasl.login.retry.backoff.max.ms = 10000
10309 sasl.login.retry.backoff.ms = 100
10310 sasl.mechanism = GSSAPI
10311 sasl.oauthbearer.assertion.algorithm = RS256
10312 sasl.oauthbearer.assertion.claim.aud = null
10313 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10314 sasl.oauthbearer.assertion.claim.iss = null
10315 sasl.oauthbearer.assertion.claim.jti.include = false
10316 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10317 sasl.oauthbearer.assertion.claim.sub = null
10318 sasl.oauthbearer.assertion.file = null
10319 sasl.oauthbearer.assertion.private.key.file = null
10320 sasl.oauthbearer.assertion.private.key.passphrase = null
10321 sasl.oauthbearer.assertion.template.file = null
10322 sasl.oauthbearer.client.credentials.client.id = null
10323 sasl.oauthbearer.client.credentials.client.secret = null
10324 sasl.oauthbearer.clock.skew.seconds = 30
10325 sasl.oauthbearer.expected.audience = null
10326 sasl.oauthbearer.expected.issuer = null
10327 sasl.oauthbearer.header.urlencode = false
10328 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10329 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10330 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10331 sasl.oauthbearer.jwks.endpoint.url = null
10332 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10333 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10334 sasl.oauthbearer.scope = null
10335 sasl.oauthbearer.scope.claim.name = scope
10336 sasl.oauthbearer.sub.claim.name = sub
10337 sasl.oauthbearer.token.endpoint.url = null
10338 security.protocol = PLAINTEXT
10339 security.providers = null
10340 send.buffer.bytes = 131072
10341 socket.connection.setup.timeout.max.ms = 30000
10342 socket.connection.setup.timeout.ms = 10000
10343 ssl.cipher.suites = null
10344 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10345 ssl.endpoint.identification.algorithm = https
10346 ssl.engine.factory.class = null
10347 ssl.key.password = null
10348 ssl.keymanager.algorithm = SunX509
10349 ssl.keystore.certificate.chain = null
10350 ssl.keystore.key = null
10351 ssl.keystore.location = null
10352 ssl.keystore.password = null
10353 ssl.keystore.type = JKS
10354 ssl.protocol = TLSv1.3
10355 ssl.provider = null
10356 ssl.secure.random.implementation = null
10357 ssl.trustmanager.algorithm = PKIX
10358 ssl.truststore.certificates = null
10359 ssl.truststore.location = null
10360 ssl.truststore.password = null
10361 ssl.truststore.type = JKS
10362 transaction.timeout.ms = 60000
10363 transaction.two.phase.commit.enable = false
10364 transactional.id = null
10365 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10366
1036715:22:12.855 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1036815:22:12.856 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Instantiated an idempotent producer.
1036915:22:12.858 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1037015:22:12.858 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1037115:22:12.858 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314132858
1037215:22:12.863 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.Metadata - [Producer clientId=producer-29] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
1037315:22:12.863 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-29] ProducerId set to 28 with epoch 0
1037415:22:12.872 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1037515:22:12.874 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1037615:22:12.874 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1037715:22:12.874 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1037815:22:12.874 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1037915:22:12.874 [pool-67-thread-3-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-29 unregistered
1038015:22:12.875 [virtual-745] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10381 allow.auto.create.topics = true
10382 auto.commit.interval.ms = 5000
10383 auto.offset.reset = earliest
10384 bootstrap.servers = [localhost:6001]
10385 check.crcs = true
10386 client.dns.lookup = use_all_dns_ips
10387 client.id = consumer-g8_1-18
10388 client.rack =
10389 connections.max.idle.ms = 540000
10390 default.api.timeout.ms = 60000
10391 enable.auto.commit = false
10392 enable.metrics.push = true
10393 exclude.internal.topics = true
10394 fetch.max.bytes = 52428800
10395 fetch.max.wait.ms = 500
10396 fetch.min.bytes = 1
10397 group.id = g8_1
10398 group.instance.id = null
10399 group.protocol = classic
10400 group.remote.assignor = null
10401 heartbeat.interval.ms = 3000
10402 interceptor.classes = []
10403 internal.leave.group.on.close = true
10404 internal.throw.on.fetch.stable.offset.unsupported = false
10405 isolation.level = read_uncommitted
10406 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10407 max.partition.fetch.bytes = 1048576
10408 max.poll.interval.ms = 300000
10409 max.poll.records = 500
10410 metadata.max.age.ms = 300000
10411 metadata.recovery.rebootstrap.trigger.ms = 300000
10412 metadata.recovery.strategy = rebootstrap
10413 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10414 metrics.num.samples = 2
10415 metrics.recording.level = INFO
10416 metrics.sample.window.ms = 30000
10417 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10418 receive.buffer.bytes = 65536
10419 reconnect.backoff.max.ms = 1000
10420 reconnect.backoff.ms = 50
10421 request.timeout.ms = 30000
10422 retry.backoff.max.ms = 1000
10423 retry.backoff.ms = 100
10424 sasl.client.callback.handler.class = null
10425 sasl.jaas.config = null
10426 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10427 sasl.kerberos.min.time.before.relogin = 60000
10428 sasl.kerberos.service.name = null
10429 sasl.kerberos.ticket.renew.jitter = 0.05
10430 sasl.kerberos.ticket.renew.window.factor = 0.8
10431 sasl.login.callback.handler.class = null
10432 sasl.login.class = null
10433 sasl.login.connect.timeout.ms = null
10434 sasl.login.read.timeout.ms = null
10435 sasl.login.refresh.buffer.seconds = 300
10436 sasl.login.refresh.min.period.seconds = 60
10437 sasl.login.refresh.window.factor = 0.8
10438 sasl.login.refresh.window.jitter = 0.05
10439 sasl.login.retry.backoff.max.ms = 10000
10440 sasl.login.retry.backoff.ms = 100
10441 sasl.mechanism = GSSAPI
10442 sasl.oauthbearer.assertion.algorithm = RS256
10443 sasl.oauthbearer.assertion.claim.aud = null
10444 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10445 sasl.oauthbearer.assertion.claim.iss = null
10446 sasl.oauthbearer.assertion.claim.jti.include = false
10447 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10448 sasl.oauthbearer.assertion.claim.sub = null
10449 sasl.oauthbearer.assertion.file = null
10450 sasl.oauthbearer.assertion.private.key.file = null
10451 sasl.oauthbearer.assertion.private.key.passphrase = null
10452 sasl.oauthbearer.assertion.template.file = null
10453 sasl.oauthbearer.client.credentials.client.id = null
10454 sasl.oauthbearer.client.credentials.client.secret = null
10455 sasl.oauthbearer.clock.skew.seconds = 30
10456 sasl.oauthbearer.expected.audience = null
10457 sasl.oauthbearer.expected.issuer = null
10458 sasl.oauthbearer.header.urlencode = false
10459 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10460 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10461 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10462 sasl.oauthbearer.jwks.endpoint.url = null
10463 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10464 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10465 sasl.oauthbearer.scope = null
10466 sasl.oauthbearer.scope.claim.name = scope
10467 sasl.oauthbearer.sub.claim.name = sub
10468 sasl.oauthbearer.token.endpoint.url = null
10469 security.protocol = PLAINTEXT
10470 security.providers = null
10471 send.buffer.bytes = 131072
10472 session.timeout.ms = 45000
10473 share.acknowledgement.mode = implicit
10474 socket.connection.setup.timeout.max.ms = 30000
10475 socket.connection.setup.timeout.ms = 10000
10476 ssl.cipher.suites = null
10477 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10478 ssl.endpoint.identification.algorithm = https
10479 ssl.engine.factory.class = null
10480 ssl.key.password = null
10481 ssl.keymanager.algorithm = SunX509
10482 ssl.keystore.certificate.chain = null
10483 ssl.keystore.key = null
10484 ssl.keystore.location = null
10485 ssl.keystore.password = null
10486 ssl.keystore.type = JKS
10487 ssl.protocol = TLSv1.3
10488 ssl.provider = null
10489 ssl.secure.random.implementation = null
10490 ssl.trustmanager.algorithm = PKIX
10491 ssl.truststore.certificates = null
10492 ssl.truststore.location = null
10493 ssl.truststore.password = null
10494 ssl.truststore.type = JKS
10495 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10496
1049715:22:12.875 [virtual-745] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1049815:22:12.877 [virtual-745] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1049915:22:12.877 [virtual-745] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1050015:22:12.877 [virtual-745] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314132877
1050115:22:12.879 [virtual-746] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Subscribed to topic(s): t8_1
1050215:22:12.881 [virtual-746] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
1050315:22:12.881 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1050415:22:12.882 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1050515:22:12.883 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c and requesting the member to rejoin with this id.
1050615:22:12.883 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c
1050715:22:12.883 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1050815:22:12.884 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c joins group g8_1 in Empty state. Adding to the group now.
1050915:22:12.884 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c).
1051015:22:15.884 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 1 with 1 members.
1051115:22:15.884 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c', protocol='range'}
1051215:22:15.885 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Finished assignment for group at generation 1: {consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c=Assignment(partitions=[t8_1-0])}
1051315:22:15.886 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c for group g8_1 for generation 1. The group has 1 members, 0 of which are static.
1051415:22:15.892 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c', protocol='range'}
1051515:22:15.892 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1051615:22:15.892 [virtual-746] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1051715:22:15.893 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Found no committed offset for partition t8_1-0
1051815:22:15.895 [virtual-746] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1051915:22:15.897 [virtual-748] ERROR o.k.KafkaFlow$ - Exception when polling for records
10520ox.flow.FlowOps$$anon$1: abort take
1052115:22:15.904 [virtual-753] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1052215:22:15.904 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Member consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1052315:22:15.904 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1052415:22:15.904 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1052515:22:15.905 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1052615:22:15.905 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_1-18-bc176f43-ee98-4bff-8131-a3adc75daf6c) members.).
1052715:22:15.905 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 2 is now empty.
1052815:22:16.398 [virtual-753] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1052915:22:16.398 [virtual-753] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1053015:22:16.398 [virtual-753] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1053115:22:16.398 [virtual-753] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1053215:22:16.399 [virtual-753] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-18 unregistered
1053315:22:16.400 [virtual-754] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10534 allow.auto.create.topics = true
10535 auto.commit.interval.ms = 5000
10536 auto.offset.reset = earliest
10537 bootstrap.servers = [localhost:6001]
10538 check.crcs = true
10539 client.dns.lookup = use_all_dns_ips
10540 client.id = consumer-g8_1-19
10541 client.rack =
10542 connections.max.idle.ms = 540000
10543 default.api.timeout.ms = 60000
10544 enable.auto.commit = false
10545 enable.metrics.push = true
10546 exclude.internal.topics = true
10547 fetch.max.bytes = 52428800
10548 fetch.max.wait.ms = 500
10549 fetch.min.bytes = 1
10550 group.id = g8_1
10551 group.instance.id = null
10552 group.protocol = classic
10553 group.remote.assignor = null
10554 heartbeat.interval.ms = 3000
10555 interceptor.classes = []
10556 internal.leave.group.on.close = true
10557 internal.throw.on.fetch.stable.offset.unsupported = false
10558 isolation.level = read_uncommitted
10559 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10560 max.partition.fetch.bytes = 1048576
10561 max.poll.interval.ms = 300000
10562 max.poll.records = 500
10563 metadata.max.age.ms = 300000
10564 metadata.recovery.rebootstrap.trigger.ms = 300000
10565 metadata.recovery.strategy = rebootstrap
10566 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10567 metrics.num.samples = 2
10568 metrics.recording.level = INFO
10569 metrics.sample.window.ms = 30000
10570 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10571 receive.buffer.bytes = 65536
10572 reconnect.backoff.max.ms = 1000
10573 reconnect.backoff.ms = 50
10574 request.timeout.ms = 30000
10575 retry.backoff.max.ms = 1000
10576 retry.backoff.ms = 100
10577 sasl.client.callback.handler.class = null
10578 sasl.jaas.config = null
10579 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10580 sasl.kerberos.min.time.before.relogin = 60000
10581 sasl.kerberos.service.name = null
10582 sasl.kerberos.ticket.renew.jitter = 0.05
10583 sasl.kerberos.ticket.renew.window.factor = 0.8
10584 sasl.login.callback.handler.class = null
10585 sasl.login.class = null
10586 sasl.login.connect.timeout.ms = null
10587 sasl.login.read.timeout.ms = null
10588 sasl.login.refresh.buffer.seconds = 300
10589 sasl.login.refresh.min.period.seconds = 60
10590 sasl.login.refresh.window.factor = 0.8
10591 sasl.login.refresh.window.jitter = 0.05
10592 sasl.login.retry.backoff.max.ms = 10000
10593 sasl.login.retry.backoff.ms = 100
10594 sasl.mechanism = GSSAPI
10595 sasl.oauthbearer.assertion.algorithm = RS256
10596 sasl.oauthbearer.assertion.claim.aud = null
10597 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10598 sasl.oauthbearer.assertion.claim.iss = null
10599 sasl.oauthbearer.assertion.claim.jti.include = false
10600 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10601 sasl.oauthbearer.assertion.claim.sub = null
10602 sasl.oauthbearer.assertion.file = null
10603 sasl.oauthbearer.assertion.private.key.file = null
10604 sasl.oauthbearer.assertion.private.key.passphrase = null
10605 sasl.oauthbearer.assertion.template.file = null
10606 sasl.oauthbearer.client.credentials.client.id = null
10607 sasl.oauthbearer.client.credentials.client.secret = null
10608 sasl.oauthbearer.clock.skew.seconds = 30
10609 sasl.oauthbearer.expected.audience = null
10610 sasl.oauthbearer.expected.issuer = null
10611 sasl.oauthbearer.header.urlencode = false
10612 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10613 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10614 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10615 sasl.oauthbearer.jwks.endpoint.url = null
10616 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10617 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10618 sasl.oauthbearer.scope = null
10619 sasl.oauthbearer.scope.claim.name = scope
10620 sasl.oauthbearer.sub.claim.name = sub
10621 sasl.oauthbearer.token.endpoint.url = null
10622 security.protocol = PLAINTEXT
10623 security.providers = null
10624 send.buffer.bytes = 131072
10625 session.timeout.ms = 45000
10626 share.acknowledgement.mode = implicit
10627 socket.connection.setup.timeout.max.ms = 30000
10628 socket.connection.setup.timeout.ms = 10000
10629 ssl.cipher.suites = null
10630 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10631 ssl.endpoint.identification.algorithm = https
10632 ssl.engine.factory.class = null
10633 ssl.key.password = null
10634 ssl.keymanager.algorithm = SunX509
10635 ssl.keystore.certificate.chain = null
10636 ssl.keystore.key = null
10637 ssl.keystore.location = null
10638 ssl.keystore.password = null
10639 ssl.keystore.type = JKS
10640 ssl.protocol = TLSv1.3
10641 ssl.provider = null
10642 ssl.secure.random.implementation = null
10643 ssl.trustmanager.algorithm = PKIX
10644 ssl.truststore.certificates = null
10645 ssl.truststore.location = null
10646 ssl.truststore.password = null
10647 ssl.truststore.type = JKS
10648 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10649
1065015:22:16.401 [virtual-754] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1065115:22:16.402 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1065215:22:16.402 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1065315:22:16.402 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314136402
1065415:22:16.403 [virtual-757] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Subscribed to topic(s): t8_1
1065515:22:16.405 [virtual-757] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
1065615:22:16.406 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1065715:22:16.406 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1065815:22:16.408 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2 and requesting the member to rejoin with this id.
1065915:22:16.409 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2
1066015:22:16.409 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1066115:22:16.409 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2 joins group g8_1 in Empty state. Adding to the group now.
1066215:22:16.409 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2).
1066315:22:19.410 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 3 with 1 members.
1066415:22:19.410 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2', protocol='range'}
1066515:22:19.411 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Finished assignment for group at generation 3: {consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2=Assignment(partitions=[t8_1-0])}
1066615:22:19.411 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2 for group g8_1 for generation 3. The group has 1 members, 0 of which are static.
1066715:22:19.417 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2', protocol='range'}
1066815:22:19.417 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1066915:22:19.417 [virtual-757] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1067015:22:19.418 [virtual-757] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t8_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
1067115:22:19.421 [virtual-754] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10672 allow.auto.create.topics = true
10673 auto.commit.interval.ms = 5000
10674 auto.offset.reset = earliest
10675 bootstrap.servers = [localhost:6001]
10676 check.crcs = true
10677 client.dns.lookup = use_all_dns_ips
10678 client.id = consumer-g8_2-20
10679 client.rack =
10680 connections.max.idle.ms = 540000
10681 default.api.timeout.ms = 60000
10682 enable.auto.commit = false
10683 enable.metrics.push = true
10684 exclude.internal.topics = true
10685 fetch.max.bytes = 52428800
10686 fetch.max.wait.ms = 500
10687 fetch.min.bytes = 1
10688 group.id = g8_2
10689 group.instance.id = null
10690 group.protocol = classic
10691 group.remote.assignor = null
10692 heartbeat.interval.ms = 3000
10693 interceptor.classes = []
10694 internal.leave.group.on.close = true
10695 internal.throw.on.fetch.stable.offset.unsupported = false
10696 isolation.level = read_uncommitted
10697 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10698 max.partition.fetch.bytes = 1048576
10699 max.poll.interval.ms = 300000
10700 max.poll.records = 500
10701 metadata.max.age.ms = 300000
10702 metadata.recovery.rebootstrap.trigger.ms = 300000
10703 metadata.recovery.strategy = rebootstrap
10704 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10705 metrics.num.samples = 2
10706 metrics.recording.level = INFO
10707 metrics.sample.window.ms = 30000
10708 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10709 receive.buffer.bytes = 65536
10710 reconnect.backoff.max.ms = 1000
10711 reconnect.backoff.ms = 50
10712 request.timeout.ms = 30000
10713 retry.backoff.max.ms = 1000
10714 retry.backoff.ms = 100
10715 sasl.client.callback.handler.class = null
10716 sasl.jaas.config = null
10717 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10718 sasl.kerberos.min.time.before.relogin = 60000
10719 sasl.kerberos.service.name = null
10720 sasl.kerberos.ticket.renew.jitter = 0.05
10721 sasl.kerberos.ticket.renew.window.factor = 0.8
10722 sasl.login.callback.handler.class = null
10723 sasl.login.class = null
10724 sasl.login.connect.timeout.ms = null
10725 sasl.login.read.timeout.ms = null
10726 sasl.login.refresh.buffer.seconds = 300
10727 sasl.login.refresh.min.period.seconds = 60
10728 sasl.login.refresh.window.factor = 0.8
10729 sasl.login.refresh.window.jitter = 0.05
10730 sasl.login.retry.backoff.max.ms = 10000
10731 sasl.login.retry.backoff.ms = 100
10732 sasl.mechanism = GSSAPI
10733 sasl.oauthbearer.assertion.algorithm = RS256
10734 sasl.oauthbearer.assertion.claim.aud = null
10735 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10736 sasl.oauthbearer.assertion.claim.iss = null
10737 sasl.oauthbearer.assertion.claim.jti.include = false
10738 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10739 sasl.oauthbearer.assertion.claim.sub = null
10740 sasl.oauthbearer.assertion.file = null
10741 sasl.oauthbearer.assertion.private.key.file = null
10742 sasl.oauthbearer.assertion.private.key.passphrase = null
10743 sasl.oauthbearer.assertion.template.file = null
10744 sasl.oauthbearer.client.credentials.client.id = null
10745 sasl.oauthbearer.client.credentials.client.secret = null
10746 sasl.oauthbearer.clock.skew.seconds = 30
10747 sasl.oauthbearer.expected.audience = null
10748 sasl.oauthbearer.expected.issuer = null
10749 sasl.oauthbearer.header.urlencode = false
10750 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10751 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10752 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10753 sasl.oauthbearer.jwks.endpoint.url = null
10754 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10755 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10756 sasl.oauthbearer.scope = null
10757 sasl.oauthbearer.scope.claim.name = scope
10758 sasl.oauthbearer.sub.claim.name = sub
10759 sasl.oauthbearer.token.endpoint.url = null
10760 security.protocol = PLAINTEXT
10761 security.providers = null
10762 send.buffer.bytes = 131072
10763 session.timeout.ms = 45000
10764 share.acknowledgement.mode = implicit
10765 socket.connection.setup.timeout.max.ms = 30000
10766 socket.connection.setup.timeout.ms = 10000
10767 ssl.cipher.suites = null
10768 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10769 ssl.endpoint.identification.algorithm = https
10770 ssl.engine.factory.class = null
10771 ssl.key.password = null
10772 ssl.keymanager.algorithm = SunX509
10773 ssl.keystore.certificate.chain = null
10774 ssl.keystore.key = null
10775 ssl.keystore.location = null
10776 ssl.keystore.password = null
10777 ssl.keystore.type = JKS
10778 ssl.protocol = TLSv1.3
10779 ssl.provider = null
10780 ssl.secure.random.implementation = null
10781 ssl.trustmanager.algorithm = PKIX
10782 ssl.truststore.certificates = null
10783 ssl.truststore.location = null
10784 ssl.truststore.password = null
10785 ssl.truststore.type = JKS
10786 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10787
1078815:22:19.422 [virtual-754] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1078915:22:19.423 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1079015:22:19.423 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1079115:22:19.423 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1768314139423
1079215:22:19.424 [virtual-761] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Subscribed to topic(s): t8_1
1079315:22:19.426 [virtual-761] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Cluster ID: BJqwkK6mSo6OvprLaTJJCw
1079415:22:19.427 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1079515:22:19.427 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1079615:22:19.429 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_2 in Empty state. Created a new member id consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e and requesting the member to rejoin with this id.
1079715:22:19.429 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: need to re-join with the given member-id: consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e
1079815:22:19.429 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1079915:22:19.430 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e joins group g8_2 in Empty state. Adding to the group now.
1080015:22:19.430 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e).
1080115:22:22.430 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_2 generation 1 with 1 members.
1080215:22:22.430 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e', protocol='range'}
1080315:22:22.431 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Finished assignment for group at generation 1: {consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e=Assignment(partitions=[t8_1-0])}
1080415:22:22.431 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e for group g8_2 for generation 1. The group has 1 members, 0 of which are static.
1080515:22:22.437 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e', protocol='range'}
1080615:22:22.438 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1080715:22:22.438 [virtual-761] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Adding newly assigned partitions: [t8_1-0]
1080815:22:22.439 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Found no committed offset for partition t8_1-0
1080915:22:22.441 [virtual-761] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1081015:22:22.443 [virtual-756] ERROR o.k.KafkaFlow$ - Exception when polling for records
10811java.lang.InterruptedException: null
10812 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10813 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10814 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10815 at ox.channels.ActorRef.ask(actor.scala:64)
10816 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10817 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10818 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10819 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10820 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10821 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10822 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10823 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10824 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10825 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1082615:22:22.443 [virtual-760] ERROR o.k.KafkaFlow$ - Exception when polling for records
10827java.lang.InterruptedException: null
10828 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10829 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10830 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10831 at ox.channels.ActorRef.ask(actor.scala:64)
10832 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10833 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10834 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10835 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10836 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10837 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10838 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10839 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10840 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10841 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1084215:22:22.443 [virtual-761] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10843java.lang.InterruptedException: null
10844 ... 18 common frames omitted
10845Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10846 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10847 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10848 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10849 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10850 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10851 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10852 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10853 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10854 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10855 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10856 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10857 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10858 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10859 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10860 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10861 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10862 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10863 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1086415:22:22.443 [virtual-757] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10865java.lang.InterruptedException: null
10866 ... 18 common frames omitted
10867Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10868 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10869 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10870 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10871 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10872 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10873 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10874 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10875 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10876 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10877 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10878 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10879 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10880 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10881 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10882 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10883 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10884 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10885 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1088615:22:22.443 [virtual-763] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Revoke previously assigned partitions [t8_1-0]
1088715:22:22.443 [virtual-764] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1088815:22:22.444 [virtual-764] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Member consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1088915:22:22.444 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Member consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1089015:22:22.444 [virtual-764] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1089115:22:22.444 [virtual-764] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1089215:22:22.444 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting generation and member id due to: consumer pro-actively leaving the group
1089315:22:22.445 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: consumer pro-actively leaving the group
1089415:22:22.445 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_2] Member consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1089515:22:22.445 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_2-20-529a22b8-9366-45d1-bf81-204b0bb1e11e) members.).
1089615:22:22.445 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_2 with generation 2 is now empty.
1089715:22:22.446 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1089815:22:22.446 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g8_1-19-f1b3a8b3-cfd7-4cdb-8fb1-140b33b394e2) members.).
1089915:22:22.446 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 4 is now empty.
1090015:22:22.931 [virtual-764] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1090115:22:22.931 [virtual-764] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1090215:22:22.932 [virtual-764] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1090315:22:22.932 [virtual-764] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1090415:22:22.933 [virtual-764] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-19 unregistered
1090515:22:22.944 [virtual-763] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1090615:22:22.944 [virtual-763] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1090715:22:22.944 [virtual-763] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1090815:22:22.944 [virtual-763] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1090915:22:22.945 [virtual-763] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_2-20 unregistered
1091015:22:22.948 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTED to SHUTTING_DOWN
1091115:22:22.948 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] shutting down
1091215:22:22.949 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Beginning controlled shutdown.
1091315:22:22.949 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] Unfenced broker 0 has requested and been granted a controlled shutdown.
1091415:22:22.954 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] enterControlledShutdown[0]: changing 11 partition(s)
1091515:22:22.954 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=0, inControlledShutdown=1, logDirs=[])
1091615:22:22.958 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker is in PENDING_CONTROLLED_SHUTDOWN state, still waiting for the active controller.
1091715:22:22.958 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 11 partition(s) to local followers.
1091815:22:22.962 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1091915:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t6_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092015:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t4-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092115:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower __consumer_offsets-0 starts at leader epoch 1 from offset 1056 with partition epoch 1 and high watermark 1056. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092215:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092315:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t8_1-0 starts at leader epoch 1 from offset 5 with partition epoch 1 and high watermark 5. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092415:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t7_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092515:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092615:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092715:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092815:22:22.963 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t2-0 starts at leader epoch 1 from offset 1000 with partition epoch 1 and high watermark 1000. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092915:22:22.964 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1093015:22:22.964 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1093115:22:22.966 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Stopped fetchers as part of controlled shutdown for 11 partitions
1093215:22:22.966 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling unloading of metadata for __consumer_offsets-0 with epoch OptionalInt[1]
1093315:22:22.966 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Started unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1093415:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_1] Unloading group metadata for generation 4.
1093515:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_2] Unloading group metadata for generation 2.
1093615:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_1] Unloading group metadata for generation 4.
1093715:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_1] Unloading group metadata for generation 4.
1093815:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_1] Unloading group metadata for generation 4.
1093915:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_1] Unloading group metadata for generation 4.
1094015:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g1] Unloading group metadata for generation 2.
1094115:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_2] Unloading group metadata for generation 2.
1094215:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_2] Unloading group metadata for generation 2.
1094315:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_2] Unloading group metadata for generation 2.
1094415:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=embedded-kafka-spec] Unloading group metadata for generation 4.
1094515:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_2] Unloading group metadata for generation 2.
1094615:22:22.967 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1094715:22:23.008 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to shut down has been granted since the lowest active offset 9223372036854775807 is now greater than the broker's controlled shutdown offset 220.
1094815:22:23.010 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=1, inControlledShutdown=0, logDirs=[])
1094915:22:23.036 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The controller has asked us to exit controlled shutdown.
1095015:22:23.036 [broker-0-lifecycle-manager-event-handler] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] beginShutdown: shutting down event queue.
1095115:22:23.037 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Transitioning from PENDING_CONTROLLED_SHUTDOWN to SHUTTING_DOWN.
1095215:22:23.037 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutting down
1095315:22:23.037 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Stopped
1095415:22:23.037 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopping socket server request processors
1095515:22:23.037 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutdown completed
1095615:22:23.040 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for heartbeat shutdown
1095715:22:23.040 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopped socket server request processors
1095815:22:23.041 [pool-67-thread-3] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shutting down
1095915:22:23.043 [pool-67-thread-3] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shut down completely
1096015:22:23.043 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1096115:22:23.043 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1096215:22:23.043 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1096315:22:23.044 [pool-67-thread-3] INFO k.s.KafkaApis - [KafkaApi-0] Shutdown complete.
1096415:22:23.046 [pool-67-thread-3] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutting down.
1096515:22:23.047 [pool-67-thread-3] INFO k.c.t.TransactionStateManager - [Transaction State Manager 0]: Shutdown complete
1096615:22:23.047 [pool-67-thread-3] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutting down
1096715:22:23.047 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Stopped
1096815:22:23.047 [pool-67-thread-3] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutdown completed
1096915:22:23.049 [pool-67-thread-3] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutdown complete.
1097015:22:23.049 [pool-67-thread-3] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutting down.
1097115:22:23.049 [pool-67-thread-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Closing coordinator runtime.
1097215:22:23.049 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutting down
1097315:22:23.049 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Stopped
1097415:22:23.049 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutdown completed
1097515:22:23.050 [pool-67-thread-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Shutting down event processor.
1097615:22:23.050 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutting down. Draining the remaining events.
1097715:22:23.050 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutdown completed
1097815:22:23.050 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutting down. Draining the remaining events.
1097915:22:23.050 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutdown completed
1098015:22:23.050 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1098115:22:23.050 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutdown completed
1098215:22:23.050 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutting down. Draining the remaining events.
1098315:22:23.050 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutdown completed
1098415:22:23.050 [pool-67-thread-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Event processor closed.
1098515:22:23.051 [pool-67-thread-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Coordinator runtime closed.
1098615:22:23.052 [pool-67-thread-3] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutdown complete.
1098715:22:23.052 [pool-67-thread-3] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutting down.
1098815:22:23.052 [pool-67-thread-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Closing coordinator runtime.
1098915:22:23.052 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutting down
1099015:22:23.052 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Stopped
1099115:22:23.052 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutdown completed
1099215:22:23.053 [pool-67-thread-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Shutting down event processor.
1099315:22:23.053 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1099415:22:23.053 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutdown completed
1099515:22:23.053 [pool-67-thread-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Event processor closed.
1099615:22:23.053 [pool-67-thread-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Coordinator runtime closed.
1099715:22:23.054 [pool-67-thread-3] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutdown complete.
1099815:22:23.054 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]KafkaEventQueue#close: shutting down event queue.
1099915:22:23.054 [broker-0-directory-assignments-manager-event-handler] INFO o.a.k.s.AssignmentsManager - [AssignmentsManager id=0] shutting down.
1100015:22:23.054 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutting down
1100115:22:23.055 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Stopped
1100215:22:23.055 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutdown completed
1100315:22:23.055 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for directory-assignments shutdown
1100415:22:23.055 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]closed event queue.
1100515:22:23.056 [pool-67-thread-3] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shutting down
1100615:22:23.056 [pool-67-thread-3] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutting down
1100715:22:23.056 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Stopped
1100815:22:23.056 [pool-67-thread-3] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutdown completed
1100915:22:23.057 [pool-67-thread-3] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutting down
1101015:22:23.058 [pool-67-thread-3] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutdown completed
1101115:22:23.058 [pool-67-thread-3] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutting down
1101215:22:23.058 [pool-67-thread-3] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutdown completed
1101315:22:23.058 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutting down
1101415:22:23.058 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Stopped
1101515:22:23.059 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutdown completed
1101615:22:23.059 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutting down
1101715:22:23.060 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Stopped
1101815:22:23.060 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutdown completed
1101915:22:23.060 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutting down
1102015:22:23.060 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Stopped
1102115:22:23.061 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutdown completed
1102215:22:23.061 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutting down
1102315:22:23.061 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Stopped
1102415:22:23.061 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutdown completed
1102515:22:23.062 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutting down
1102615:22:23.062 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Stopped
1102715:22:23.062 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutdown completed
1102815:22:23.062 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutting down
1102915:22:23.063 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Stopped
1103015:22:23.063 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutdown completed
1103115:22:23.067 [pool-67-thread-3] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutting down
1103215:22:23.067 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Stopped
1103315:22:23.067 [pool-67-thread-3] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutdown completed
1103415:22:23.067 [pool-67-thread-3] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shut down completely
1103515:22:23.067 [pool-67-thread-3] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutting down
1103615:22:23.068 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Stopped
1103715:22:23.068 [pool-67-thread-3] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutdown completed
1103815:22:23.068 [pool-67-thread-3] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for alter-partition shutdown
1103915:22:23.069 [pool-67-thread-3] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutting down
1104015:22:23.069 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Stopped
1104115:22:23.069 [pool-67-thread-3] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutdown completed
1104215:22:23.069 [pool-67-thread-3] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for forwarding shutdown
1104315:22:23.070 [pool-67-thread-3] INFO k.l.LogManager - Shutting down.
1104415:22:23.070 [pool-67-thread-3] INFO o.a.k.s.i.l.LogCleaner - Shutting down the log cleaner.
1104515:22:23.071 [pool-67-thread-3] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutting down
1104615:22:23.071 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Stopped
1104715:22:23.071 [pool-67-thread-3] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutdown completed
1104815:22:23.075 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 2 ms.
1104915:22:23.076 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 2 ms.
1105015:22:23.078 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__consumer_offsets-0] Wrote producer snapshot at offset 1056 with 0 producer ids in 1 ms.
1105115:22:23.078 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t2-0] Wrote producer snapshot at offset 1000 with 1 producer ids in 1 ms.
1105215:22:23.080 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105315:22:23.080 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 0 ms.
1105415:22:23.081 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t7_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105515:22:23.082 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t8_1-0] Wrote producer snapshot at offset 5 with 5 producer ids in 1 ms.
1105615:22:23.082 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t4-0] Wrote producer snapshot at offset 3 with 1 producer ids in 1 ms.
1105715:22:23.083 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105815:22:23.084 [log-closing-/tmp/kafka-logs5982689497894266552] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t6_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 0 ms.
1105915:22:23.115 [pool-67-thread-3] INFO k.l.LogManager - Shutdown complete.
1106015:22:23.115 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutting down
1106115:22:23.116 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1106215:22:23.116 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutting down
1106315:22:23.116 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Stopped
1106415:22:23.117 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Stopped
1106515:22:23.116 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutdown completed
1106615:22:23.117 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutting down
1106715:22:23.117 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Stopped
1106815:22:23.117 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutdown completed
1106915:22:23.117 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1107015:22:23.118 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1107115:22:23.118 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1107215:22:23.118 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutting down socket server
1107315:22:23.131 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutdown completed
1107415:22:23.132 [pool-67-thread-3] INFO o.a.k.s.l.m.BrokerTopicStats - Broker and topic stats closed
1107515:22:23.132 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutting down
1107615:22:23.132 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Stopped
1107715:22:23.132 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutdown completed
1107815:22:23.134 [pool-67-thread-3] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutting down
1107915:22:23.134 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Stopped
1108015:22:23.134 [pool-67-thread-3] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutdown completed
1108115:22:23.135 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutting down
1108215:22:23.135 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Stopped
1108315:22:23.135 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutdown completed
1108415:22:23.136 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] closed event queue.
1108515:22:23.136 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutting down
1108615:22:23.136 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Stopped
1108715:22:23.136 [pool-67-thread-3] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutdown completed
1108815:22:23.137 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] shut down completed
1108915:22:23.137 [pool-67-thread-3] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTTING_DOWN to SHUTDOWN
1109015:22:23.137 [pool-67-thread-3] INFO k.s.ControllerServer - [ControllerServer id=0] shutting down
1109115:22:23.138 [pool-67-thread-3] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutting down
1109215:22:23.153 [pool-67-thread-3] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutdown completed
1109315:22:23.153 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Stopped
1109415:22:23.153 [pool-67-thread-3] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutting down
1109515:22:23.153 [pool-67-thread-3] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Beginning graceful shutdown
1109615:22:23.154 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Graceful shutdown completed
1109715:22:23.154 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [RaftManager id=0] Completed graceful shutdown of RaftClient
1109815:22:23.154 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Stopped
1109915:22:23.154 [pool-67-thread-3] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutdown completed
1110015:22:23.156 [pool-67-thread-3] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutting down
1110115:22:23.156 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Stopped
1110215:22:23.156 [pool-67-thread-3] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutdown completed
1110315:22:23.158 [pool-67-thread-3] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__cluster_metadata-0] Wrote producer snapshot at offset 222 with 0 producer ids in 2 ms.
1110415:22:23.160 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] beginShutdown: shutting down event queue.
1110515:22:23.160 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] shutting down.
1110615:22:23.160 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutting down
1110715:22:23.161 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Stopped
1110815:22:23.161 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1110915:22:23.162 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1111015:22:23.162 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] closed event queue.
1111115:22:23.163 [pool-67-thread-3] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1111215:22:23.163 [pool-67-thread-3] WARN o.a.k.c.NetworkClient - [NodeToControllerChannelManager id=0 name=registration] Attempting to close NetworkClient that has already been closed.
1111315:22:23.163 [pool-67-thread-3] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1111415:22:23.163 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=otvYEWT7TdqjIMfAFNpAYw] closed event queue.
1111515:22:23.164 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopping socket server request processors
1111615:22:23.166 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopped socket server request processors
1111715:22:23.166 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] QuorumController#beginShutdown: shutting down event queue.
1111815:22:23.167 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutting down socket server
1111915:22:23.168 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] writeNoOpRecord: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112015:22:23.168 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] maybeFenceStaleBroker: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112115:22:23.168 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] generatePeriodicPerformanceMessage: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112215:22:23.168 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electPreferred: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112315:22:23.168 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electUnclean: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112415:22:23.168 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] expireDelegationTokens: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112515:22:23.172 [pool-67-thread-3] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutdown completed
1112615:22:23.172 [pool-67-thread-3] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shutting down
1112715:22:23.173 [pool-67-thread-3] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shut down completely
1112815:22:23.174 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1112915:22:23.174 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1113015:22:23.175 [pool-67-thread-3] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1113115:22:23.175 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutting down
1113215:22:23.176 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Stopped
1113315:22:23.176 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1113415:22:23.176 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutting down
1113515:22:23.176 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Stopped
1113615:22:23.176 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutdown completed
1113715:22:23.176 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutting down
1113815:22:23.177 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutdown completed
1113915:22:23.177 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1114015:22:23.177 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Stopped
1114115:22:23.177 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1114215:22:23.177 [pool-67-thread-3] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1114315:22:23.177 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] closed event queue.
1114415:22:23.179 [pool-67-thread-3] INFO k.s.SharedServer - [SharedServer id=0] Stopping SharedServer
1114515:22:23.179 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] beginShutdown: shutting down event queue.
1114615:22:23.179 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] beginShutdown: shutting down event queue.
1114715:22:23.180 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1114815:22:23.181 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] closed event queue.
1114915:22:23.182 [pool-67-thread-3] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1115015:22:23.182 [pool-67-thread-3] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1115115:22:23.182 [pool-67-thread-3] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1115215:22:23.182 [pool-67-thread-3] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1115315:22:23.183 [pool-67-thread-3] INFO o.a.k.c.u.AppInfoParser - App info kafka.server for 0 unregistered
11154[info] KafkaTest:
11155[info] source
11156[info] - should receive messages from a topic
11157[info] stage
11158[info] - should publish messages to a topic
11159[info] stage
11160[info] - should commit offsets of processed messages
11161[info] drain
11162[info] - should publish messages to a topic
11163[info] drain
11164[info] - should commit offsets of processed messages
11165[info] drain
11166[info] - should commit offsets using runCommit
11167[info] stage
11168[info] - should commit offsets using mapCommit
11169[info] stage
11170[info] - should commit offsets when consuming a finite stream using take
11171
11172************************
11173Build summary:
11174[{
11175 "module": "flow-reactive-streams",
11176 "compile": {"status": "ok", "tookMs": 16970, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11177 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11178 "test-compile": {"status": "ok", "tookMs": 301, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11179 "test": {"status": "ok", "tookMs": 200, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11180 "publish": {"status": "skipped", "tookMs": 0},
11181 "metadata": {
11182 "crossScalaVersions": ["2.12.20"]
11183}
11184},{
11185 "module": "mdc-logback",
11186 "compile": {"status": "ok", "tookMs": 686, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11187 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11188 "test-compile": {"status": "ok", "tookMs": 1063, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11189 "test": {"status": "ok", "tookMs": 553, "passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1, "byFramework": [{"framework": "unknown", "stats": {"passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1}}]},
11190 "publish": {"status": "skipped", "tookMs": 0},
11191 "metadata": {
11192 "crossScalaVersions": ["2.12.20"]
11193}
11194},{
11195 "module": "core",
11196 "compile": {"status": "ok", "tookMs": 65, "warnings": 13, "errors": 0, "sourceVersion": "3.8"},
11197 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11198 "test-compile": {"status": "ok", "tookMs": 23907, "warnings": 20, "errors": 0, "sourceVersion": "3.8"},
11199 "test": {"status": "ok", "tookMs": 150064, "passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802, "byFramework": [{"framework": "unknown", "stats": {"passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802}}]},
11200 "publish": {"status": "skipped", "tookMs": 0},
11201 "metadata": {
11202 "crossScalaVersions": ["2.12.20"]
11203}
11204},{
11205 "module": "cron",
11206 "compile": {"status": "ok", "tookMs": 415, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11207 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11208 "test-compile": {"status": "ok", "tookMs": 740, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11209 "test": {"status": "ok", "tookMs": 4538, "passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3, "byFramework": [{"framework": "unknown", "stats": {"passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3}}]},
11210 "publish": {"status": "skipped", "tookMs": 0},
11211 "metadata": {
11212 "crossScalaVersions": ["2.12.20"]
11213}
11214},{
11215 "module": "otel-context",
11216 "compile": {"status": "ok", "tookMs": 286, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11217 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11218 "test-compile": {"status": "ok", "tookMs": 315, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11219 "test": {"status": "ok", "tookMs": 191, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11220 "publish": {"status": "skipped", "tookMs": 0},
11221 "metadata": {
11222 "crossScalaVersions": ["2.12.20"]
11223}
11224},{
11225 "module": "kafka",
11226 "compile": {"status": "ok", "tookMs": 832, "warnings": 1, "errors": 0, "sourceVersion": "3.8"},
11227 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11228 "test-compile": {"status": "ok", "tookMs": 1307, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11229 "test": {"status": "ok", "tookMs": 89722, "passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8, "byFramework": [{"framework": "unknown", "stats": {"passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8}}]},
11230 "publish": {"status": "skipped", "tookMs": 0},
11231 "metadata": {
11232 "crossScalaVersions": ["2.12.20"]
11233}
11234}]
11235************************
11236[success] Total time: 298 s (0:04:58.0), completed Jan 13, 2026, 3:22:23 PM
11237[0JChecking patch project/plugins.sbt...
11238Checking patch build.sbt...
11239Applied patch project/plugins.sbt cleanly.
11240Applied patch build.sbt cleanly.