Build Logs
softwaremill/ox • 3.8.0-RC6:2026-01-08
Errors
231
Warnings
473
Total Lines
11241
1##################################
2Clonning https://github.com/softwaremill/ox.git into /build/repo using revision v1.0.2
3##################################
4Note: switching to '9cb2ebc1df12e84198f24d8429f0ed135892788e'.
5
6You are in 'detached HEAD' state. You can look around, make experimental
7changes and commit them, and you can discard any commits you make in this
8state without impacting any branches by switching back to a branch.
9
10If you want to create a new branch to retain commits you create, you may
11do so (now or later) by using -c with the switch command. Example:
12
13 git switch -c <new-branch-name>
14
15Or undo this operation with:
16
17 git switch -
18
19Turn off this advice by setting config variable advice.detachedHead to false
20
21Using target Scala version for migration: 3.7.4
22Migrating project for -source:3.7 using Scala 3.7.4
23----
24Preparing build for 3.7.4
25Would try to apply common scalacOption (best-effort, sbt/mill only):
26Append: -rewrite,REQUIRE:-source:3.7-migration
27Remove: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
28----
29Starting build for 3.7.4
30Execute tests: false
31sbt project found:
32No prepare script found for project softwaremill/ox
33##################################
34Scala version: 3.7.4
35Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
36Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
37##################################
38Using extra scalacOptions: -rewrite,REQUIRE:-source:3.7-migration
39Filtering out scalacOptions: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
40[sbt_options] declare -a sbt_options=()
41[process_args] java_version = '21'
42[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
43# Executing command line:
44java
45-Dfile.encoding=UTF-8
46-Dcommunitybuild.scala=3.7.4
47-Dcommunitybuild.project.dependencies.add=
48-Xmx7G
49-Xms4G
50-Xss8M
51-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
52-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
53-jar
54/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
55"setCrossScalaVersions 3.7.4"
56"++3.7.4 -v"
57"mapScalacOptions "-rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s" "-indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
58"set every credentials := Nil"
59"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
60"removeScalacOptionsStartingWith -P:wartremover"
61
62moduleMappings
63"runBuild 3.7.4 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
64
65[info] [launcher] getting org.scala-sbt sbt 1.11.7 (this may take some time)...
66[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
67[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
68[info] loading project definition from /build/repo/project
69[info] compiling 2 Scala sources to /build/repo/project/target/scala-2.12/sbt-1.0/classes ...
70[info] Non-compiled module 'compiler-bridge_2.12' for Scala 2.12.20. Compiling...
71[info] Compilation completed in 8.46s.
72[info] done compiling
73[info] loading settings for project rootProject from build.sbt...
74[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
75[info] set current project to ox (in build file:/build/repo/)
76Execute setCrossScalaVersions: 3.7.4
77OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in core/crossScalaVersions
78OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in core/crossScalaVersions
79OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in flowReactiveStreams/crossScalaVersions
80OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in flowReactiveStreams/crossScalaVersions
81OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in cron/crossScalaVersions
82OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in cron/crossScalaVersions
83OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in documentation/crossScalaVersions
84OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in documentation/crossScalaVersions
85OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in rootProject/crossScalaVersions
86OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in rootProject/crossScalaVersions
87OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in otelContext/crossScalaVersions
88OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in otelContext/crossScalaVersions
89OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in kafka/crossScalaVersions
90OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in kafka/crossScalaVersions
91OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in mdcLogback/crossScalaVersions
92OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in mdcLogback/crossScalaVersions
93[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
94[info] set current project to ox (in build file:/build/repo/)
95[info] Setting Scala version to 3.7.4 on 8 projects.
96[info] Switching Scala version on:
97[info] flowReactiveStreams (3.7.4)
98[info] documentation (3.7.4)
99[info] cron (3.7.4)
100[info] * rootProject (3.7.4)
101[info] mdcLogback (3.7.4)
102[info] kafka (3.7.4)
103[info] core (3.7.4)
104[info] otelContext (3.7.4)
105[info] Excluding projects:
106[info] Reapplying settings...
107[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
108[info] set current project to ox (in build file:/build/repo/)
109Execute mapScalacOptions: -rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
110[info] Reapplying settings...
111[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
112[info] set current project to ox (in build file:/build/repo/)
113[info] Defining Global / credentials, core / credentials and 6 others.
114[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
115[info] Run `last` for details.
116[info] Reapplying settings...
117[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
118[info] set current project to ox (in build file:/build/repo/)
119Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
120[info] Reapplying settings...
121OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
122
123 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
124 Did you mean flowReactiveStreams / allExcludeDependencies ?
125 , retry without global scopes
126[info] Reapplying settings...
127[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
128[info] set current project to ox (in build file:/build/repo/)
129Execute removeScalacOptionsStartingWith: -P:wartremover
130[info] Reapplying settings...
131[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
132[info] set current project to ox (in build file:/build/repo/)
133[success] Total time: 0 s, completed Jan 8, 2026, 1:27:38 AM
134Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
135Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),CompileOnly,List()))
136Starting build...
137Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
138Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
139OpenCB::Exclude Scala3 specific scalacOption `-rewrite` in Scala 2.12.20 module Global
140OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.7-migration` in Scala 2.12.20 module Global
141OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
142OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
143Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
144[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
145[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
146[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
147[warn] | ^
148[warn] | unused implicit parameter
149[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
150[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
151[warn] | ^
152[warn] | unused implicit parameter
153[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
154[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
155[warn] | ^
156[warn] | unused explicit parameter
157[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
158[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
159[warn] | ^
160[warn] | unused explicit parameter
161[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/channels/SourceOps.scala:5:12
162[warn] 5 |import java.util
163[warn] | ^^^^
164[warn] | unused import
165[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
166[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
167[warn] | ^^^^
168[warn] | unused explicit parameter
169[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:5:10
170[warn] 5 |import ox.Ox
171[warn] | ^^
172[warn] | unused import
173[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:616:8
174[warn] 616 | tap(t => sleep(emitEveryMillis))
175[warn] | ^
176[warn] | unused explicit parameter
177[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:947:53
178[warn] 947 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
179[warn] | ^^^^
180[warn] | unused explicit parameter
181[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala:11:10
182[warn] 11 |import ox.fork
183[warn] | ^^^^
184[warn] | unused import
185[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
186[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
187[warn] | ^^^^^^
188[warn] | unused explicit parameter
189[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
190[warn] 123 | private var successCalls = 0
191[warn] | ^^^^^^^^^^^^
192[warn] | private variable was mutated but not read
193[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
194[warn] 167 | private var successCalls = 0
195[warn] | ^^^^^^^^^^^^
196[warn] | private variable was mutated but not read
197[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:414:15
198[warn] 414 | case Nested(t) =>
199[warn] | ^
200[warn] |the type test for Nested cannot be checked at runtime because it's a local class
201[warn] |
202[warn] | longer explanation available when compiling with `-explain`
203[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
204[warn] 150 | case FromParent(t) =>
205[warn] | ^
206[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
207[warn] |
208[warn] | longer explanation available when compiling with `-explain`
209[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
210[warn] 154 | case ChildDone(v) =>
211[warn] | ^
212[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
213[warn] |
214[warn] | longer explanation available when compiling with `-explain`
215[info] [patched file /build/repo/core/src/main/scala/ox/channels/SourceOps.scala]
216[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowOps.scala]
217[info] [patched file /build/repo/core/src/main/scala/ox/oxThreadFactory.scala]
218[info] [patched file /build/repo/core/src/main/scala/ox/local.scala]
219[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala]
220[warn] 16 warnings found
221[info] done compiling
222[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.7.4/classes ...
223[info] done compiling
224[info] compiling 5 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
225[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
226[warn] 615 | tap(t => sleep(emitEveryMillis))
227[warn] | ^
228[warn] | unused explicit parameter
229[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
230[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
231[warn] | ^^^^
232[warn] | unused explicit parameter
233[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
234[warn] 413 | case Nested(t) =>
235[warn] | ^
236[warn] |the type test for Nested cannot be checked at runtime because it's a local class
237[warn] |
238[warn] | longer explanation available when compiling with `-explain`
239[warn] three warnings found
240[info] done compiling
241[info] compiling 1 Scala source to /build/repo/core/target/scala-3.7.4/classes ...
242[warn] three warnings found
243[info] done compiling
244[info] compiling 25 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
245[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
246[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
247[warn] | ^^^^
248[warn] | unused explicit parameter
249[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
250[warn] 123 | private var successCalls = 0
251[warn] | ^^^^^^^^^^^^
252[warn] | private variable was mutated but not read
253[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
254[warn] 167 | private var successCalls = 0
255[warn] | ^^^^^^^^^^^^
256[warn] | private variable was mutated but not read
257[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
258[warn] 150 | case FromParent(t) =>
259[warn] | ^
260[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
261[warn] |
262[warn] | longer explanation available when compiling with `-explain`
263[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
264[warn] 154 | case ChildDone(v) =>
265[warn] | ^
266[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
267[warn] |
268[warn] | longer explanation available when compiling with `-explain`
269[warn] 8 warnings found
270[info] done compiling
271Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
272Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
273[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/classes ...
274[info] done compiling
275[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/test-classes ...
276[info] done compiling
277Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
278Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
279[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
280[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
281[warn] 43 | def transformation(i: Int) =
282[warn] | ^
283[warn] | unused explicit parameter
284[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
285[warn] 38 | def transformation(i: Int) =
286[warn] | ^
287[warn] | unused explicit parameter
288[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
289[warn] 43 | def transformation(i: Int) =
290[warn] | ^
291[warn] | unused explicit parameter
292[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:9:27
293[warn] 9 |import scala.util.boundary.*
294[warn] | ^
295[warn] | unused import
296[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:29
297[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
298[warn] | ^^^^^^^^^
299[warn] | unused import
300[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:40
301[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
302[warn] | ^^^^^^^^
303[warn] | unused import
304[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
305[warn] 80 | (1 to 5).map(i =>
306[warn] | ^
307[warn] | unused explicit parameter
308[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
309[warn] 126 | use(new TestResource, _.release()) { r =>
310[warn] | ^
311[warn] | unused explicit parameter
312[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
313[warn] 140 | useCloseable(new TestResource) { r =>
314[warn] | ^
315[warn] | unused explicit parameter
316[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
317[warn] 157 | use(new TestResource, _.release()) { r =>
318[warn] | ^
319[warn] | unused explicit parameter
320[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:3:43
321[warn] 3 |import org.scalatest.concurrent.Eventually.*
322[warn] | ^
323[warn] | unused import
324[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:3:43
325[warn] 3 |import org.scalatest.concurrent.Eventually.*
326[warn] | ^
327[warn] | unused import
328[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:387:44
329[warn] 387 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
330[warn] | ^
331[warn] | unused implicit parameter
332[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala:5:10
333[warn] 5 |import ox.*
334[warn] | ^
335[warn] | unused import
336[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala:5:10
337[warn] 5 |import ox.*
338[warn] | ^
339[warn] | unused import
340[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala:5:10
341[warn] 5 |import ox.*
342[warn] | ^
343[warn] | unused import
344[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala:5:10
345[warn] 5 |import ox.*
346[warn] | ^
347[warn] | unused import
348[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala:5:10
349[warn] 5 |import ox.*
350[warn] | ^
351[warn] | unused import
352[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala:5:10
353[warn] 5 |import ox.*
354[warn] | ^
355[warn] | unused import
356[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala:5:10
357[warn] 5 |import ox.*
358[warn] | ^
359[warn] | unused import
360[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala:5:10
361[warn] 5 |import ox.*
362[warn] | ^
363[warn] | unused import
364[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala:6:10
365[warn] 6 |import ox.*
366[warn] | ^
367[warn] | unused import
368[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala:5:10
369[warn] 5 |import ox.*
370[warn] | ^
371[warn] | unused import
372[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala:5:10
373[warn] 5 |import ox.*
374[warn] | ^
375[warn] | unused import
376[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala:5:10
377[warn] 5 |import ox.*
378[warn] | ^
379[warn] | unused import
380[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
381[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
382[warn] | ^
383[warn] | unused explicit parameter
384[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
385[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
386[warn] | ^
387[warn] | unused explicit parameter
388[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
389[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
390[warn] | ^
391[warn] | unused explicit parameter
392[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
393[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
394[warn] | ^
395[warn] | unused explicit parameter
396[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
397[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
398[warn] | ^
399[warn] | unused explicit parameter
400[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
401[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
402[warn] | ^
403[warn] | unused explicit parameter
404[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
405[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
406[warn] | ^
407[warn] | unused explicit parameter
408[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
409[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
410[warn] | ^
411[warn] | unused explicit parameter
412[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala:6:10
413[warn] 6 |import ox.*
414[warn] | ^
415[warn] | unused import
416[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala:5:10
417[warn] 5 |import ox.*
418[warn] | ^
419[warn] | unused import
420[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala:5:10
421[warn] 5 |import ox.*
422[warn] | ^
423[warn] | unused import
424[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala:5:10
425[warn] 5 |import ox.*
426[warn] | ^
427[warn] | unused import
428[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala:5:10
429[warn] 5 |import ox.*
430[warn] | ^
431[warn] | unused import
432[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala:5:10
433[warn] 5 |import ox.*
434[warn] | ^
435[warn] | unused import
436[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala:5:10
437[warn] 5 |import ox.*
438[warn] | ^
439[warn] | unused import
440[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala:5:10
441[warn] 5 |import ox.*
442[warn] | ^
443[warn] | unused import
444[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala:5:10
445[warn] 5 |import ox.*
446[warn] | ^
447[warn] | unused import
448[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala:5:10
449[warn] 5 |import ox.*
450[warn] | ^
451[warn] | unused import
452[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala:5:10
453[warn] 5 |import ox.*
454[warn] | ^
455[warn] | unused import
456[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala:6:10
457[warn] 6 |import ox.*
458[warn] | ^
459[warn] | unused import
460[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala:5:10
461[warn] 5 |import ox.*
462[warn] | ^
463[warn] | unused import
464[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala:5:10
465[warn] 5 |import ox.*
466[warn] | ^
467[warn] | unused import
468[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
469[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
470[warn] | ^^^^^^^
471[warn] | unused explicit parameter
472[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
473[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
474[warn] | ^^^^^^^
475[warn] | unused explicit parameter
476[info] [patched file /build/repo/core/src/test/scala/ox/MapParTest.scala]
477[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala]
478[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipWithIndexTest.scala]
479[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala]
480[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala]
481[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala]
482[info] [patched file /build/repo/core/src/test/scala/ox/CollectParTest.scala]
483[info] [patched file /build/repo/core/src/test/scala/ox/OxAppTest.scala]
484[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala]
485[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala]
486[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala]
487[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala]
488[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala]
489[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala]
490[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala]
491[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala]
492[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala]
493[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala]
494[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala]
495[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala]
496[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala]
497[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala]
498[info] [patched file /build/repo/core/src/test/scala/ox/FilterParTest.scala]
499[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala]
500[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala]
501[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala]
502[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala]
503[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala]
504[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala]
505[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala]
506[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala]
507[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala]
508[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala]
509[warn] 49 warnings found
510[info] done compiling
511[info] compiling 33 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
512[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
513[warn] 43 | def transformation(i: Int) =
514[warn] | ^
515[warn] | unused explicit parameter
516[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
517[warn] 43 | def transformation(i: Int) =
518[warn] | ^
519[warn] | unused explicit parameter
520[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
521[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
522[warn] | ^
523[warn] | unused implicit parameter
524[warn] three warnings found
525[info] done compiling
526Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
527Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
528[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/classes ...
529[info] done compiling
530[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
531[warn] -- [E198] Unused Symbol Warning: /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala:7:33
532[warn] 7 |import scala.concurrent.duration.*
533[warn] | ^
534[warn] | unused import
535[info] [patched file /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala]
536[warn] one warning found
537[info] done compiling
538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
539[info] done compiling
540Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
541Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
542[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.7.4/classes ...
543[info] done compiling
544Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
545Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
546[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
547[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
548[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
549[warn] | ^^^^^^^
550[warn] | unused explicit parameter
551[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala:3:41
552[warn] 3 |import org.apache.kafka.clients.consumer.ConsumerRecord
553[warn] | ^^^^^^^^^^^^^^
554[warn] | unused import
555[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/kafkaOffsetCommit.scala]
556[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala]
557[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaStage.scala]
558[warn] two warnings found
559[info] done compiling
560[info] compiling 3 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
561[info] done compiling
562[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.7.4/test-classes ...
563[info] [patched file /build/repo/kafka/src/test/scala/ox/kafka/KafkaTest.scala]
564[info] done compiling
565[info] compiling 1 Scala source to /build/repo/kafka/target/scala-3.7.4/test-classes ...
566[info] done compiling
567
568************************
569Build summary:
570[{
571 "module": "flow-reactive-streams",
572 "compile": {"status": "ok", "tookMs": 16123, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
573 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
574 "test-compile": {"status": "ok", "tookMs": 6581, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
575 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
576 "publish": {"status": "skipped", "tookMs": 0},
577 "metadata": {
578 "crossScalaVersions": ["2.12.20"]
579}
580},{
581 "module": "mdc-logback",
582 "compile": {"status": "ok", "tookMs": 469, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
583 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
584 "test-compile": {"status": "ok", "tookMs": 894, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
585 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
586 "publish": {"status": "skipped", "tookMs": 0},
587 "metadata": {
588 "crossScalaVersions": ["2.12.20"]
589}
590},{
591 "module": "core",
592 "compile": {"status": "ok", "tookMs": 166, "warnings": 16, "errors": 0, "sourceVersion": "3.7-migration"},
593 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
594 "test-compile": {"status": "ok", "tookMs": 20853, "warnings": 49, "errors": 0, "sourceVersion": "3.7-migration"},
595 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
596 "publish": {"status": "skipped", "tookMs": 0},
597 "metadata": {
598 "crossScalaVersions": ["2.12.20"]
599}
600},{
601 "module": "cron",
602 "compile": {"status": "ok", "tookMs": 418, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
603 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
604 "test-compile": {"status": "ok", "tookMs": 859, "warnings": 1, "errors": 0, "sourceVersion": "3.7-migration"},
605 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
606 "publish": {"status": "skipped", "tookMs": 0},
607 "metadata": {
608 "crossScalaVersions": ["2.12.20"]
609}
610},{
611 "module": "otel-context",
612 "compile": {"status": "ok", "tookMs": 198, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
613 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
614 "test-compile": {"status": "ok", "tookMs": 145, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
615 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
616 "publish": {"status": "skipped", "tookMs": 0},
617 "metadata": {
618 "crossScalaVersions": ["2.12.20"]
619}
620},{
621 "module": "kafka",
622 "compile": {"status": "ok", "tookMs": 777, "warnings": 2, "errors": 0, "sourceVersion": "3.7-migration"},
623 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
624 "test-compile": {"status": "ok", "tookMs": 1409, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
625 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
626 "publish": {"status": "skipped", "tookMs": 0},
627 "metadata": {
628 "crossScalaVersions": ["2.12.20"]
629}
630}]
631************************
632[success] Total time: 63 s (0:01:03.0), completed Jan 8, 2026, 1:28:41 AM
633[0JChecking patch project/plugins.sbt...
634Checking patch build.sbt...
635Applied patch project/plugins.sbt cleanly.
636Applied patch build.sbt cleanly.
637Commit migration rewrites
638Switched to a new branch 'opencb/migrate-source-3.7'
639[opencb/migrate-source-3.7 8dd1e5e] Apply Scala compiler rewrites using -source:3.7-migration using Scala 3.7.4
640 43 files changed, 24 insertions(+), 60 deletions(-)
641----
642Preparing build for 3.8.0-RC6
643Scala binary version found: 3.8
644Implicitly using source version 3.8
645Scala binary version found: 3.8
646Implicitly using source version 3.8
647Would try to apply common scalacOption (best-effort, sbt/mill only):
648Append: ,REQUIRE:-source:3.8
649Remove: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
650----
651Starting build for 3.8.0-RC6
652Execute tests: true
653sbt project found:
654No prepare script found for project softwaremill/ox
655##################################
656Scala version: 3.8.0-RC6
657Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
658Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
659##################################
660Using extra scalacOptions: ,REQUIRE:-source:3.8
661Filtering out scalacOptions: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
662[sbt_options] declare -a sbt_options=()
663[process_args] java_version = '21'
664[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
665# Executing command line:
666java
667-Dfile.encoding=UTF-8
668-Dcommunitybuild.scala=3.8.0-RC6
669-Dcommunitybuild.project.dependencies.add=
670-Xmx7G
671-Xms4G
672-Xss8M
673-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
674-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
675-jar
676/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
677"setCrossScalaVersions 3.8.0-RC6"
678"++3.8.0-RC6 -v"
679"mapScalacOptions ",REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s" ",-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
680"set every credentials := Nil"
681"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
682"removeScalacOptionsStartingWith -P:wartremover"
683
684moduleMappings
685"runBuild 3.8.0-RC6 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
686
687[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
688[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
689[info] loading project definition from /build/repo/project
690[info] loading settings for project rootProject from build.sbt...
691[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
692[info] set current project to ox (in build file:/build/repo/)
693Execute setCrossScalaVersions: 3.8.0-RC6
694OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in mdcLogback/crossScalaVersions
695OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in mdcLogback/crossScalaVersions
696OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in core/crossScalaVersions
697OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in core/crossScalaVersions
698OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in flowReactiveStreams/crossScalaVersions
699OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in cron/crossScalaVersions
700OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in cron/crossScalaVersions
701OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in flowReactiveStreams/crossScalaVersions
702OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in documentation/crossScalaVersions
703OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in documentation/crossScalaVersions
704OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in rootProject/crossScalaVersions
705OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in rootProject/crossScalaVersions
706OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in otelContext/crossScalaVersions
707OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in otelContext/crossScalaVersions
708OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC6 in kafka/crossScalaVersions
709OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC6) in kafka/crossScalaVersions
710[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
711[info] set current project to ox (in build file:/build/repo/)
712[info] Setting Scala version to 3.8.0-RC6 on 8 projects.
713[info] Switching Scala version on:
714[info] flowReactiveStreams (3.8.0-RC6)
715[info] documentation (3.8.0-RC6)
716[info] cron (3.8.0-RC6)
717[info] * rootProject (3.8.0-RC6)
718[info] mdcLogback (3.8.0-RC6)
719[info] kafka (3.8.0-RC6)
720[info] core (3.8.0-RC6)
721[info] otelContext (3.8.0-RC6)
722[info] Excluding projects:
723[info] Reapplying settings...
724[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
725[info] set current project to ox (in build file:/build/repo/)
726Execute mapScalacOptions: ,REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
727[info] Reapplying settings...
728[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
729[info] set current project to ox (in build file:/build/repo/)
730[info] Defining Global / credentials, core / credentials and 6 others.
731[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
732[info] Run `last` for details.
733[info] Reapplying settings...
734[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
735[info] set current project to ox (in build file:/build/repo/)
736Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
737[info] Reapplying settings...
738OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
739
740 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
741 Did you mean flowReactiveStreams / allExcludeDependencies ?
742 , retry without global scopes
743[info] Reapplying settings...
744[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
745[info] set current project to ox (in build file:/build/repo/)
746Execute removeScalacOptionsStartingWith: -P:wartremover
747[info] Reapplying settings...
748[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
749[info] set current project to ox (in build file:/build/repo/)
750[success] Total time: 0 s, completed Jan 8, 2026, 1:28:52 AM
751Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
752Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),Full,List()))
753Starting build...
754Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
755Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
756OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.8` in Scala 2.12.20 module Global
757OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
758OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
759Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
760[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.8.0-RC6/classes ...
761[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
762[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
763[warn] | ^
764[warn] | unused implicit parameter
765[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
766[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
767[warn] | ^
768[warn] | unused implicit parameter
769[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
770[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
771[warn] | ^
772[warn] | unused explicit parameter
773[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
774[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
775[warn] | ^
776[warn] | unused explicit parameter
777[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
778[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
779[warn] | ^^^^
780[warn] | unused explicit parameter
781[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
782[warn] 615 | tap(t => sleep(emitEveryMillis))
783[warn] | ^
784[warn] | unused explicit parameter
785[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
786[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
787[warn] | ^^^^
788[warn] | unused explicit parameter
789[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
790[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
791[warn] | ^^^^^^
792[warn] | unused explicit parameter
793[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
794[warn] 123 | private var successCalls = 0
795[warn] | ^^^^^^^^^^^^
796[warn] | private variable was mutated but not read
797[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
798[warn] 167 | private var successCalls = 0
799[warn] | ^^^^^^^^^^^^
800[warn] | private variable was mutated but not read
801[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
802[warn] 413 | case Nested(t) =>
803[warn] | ^
804[warn] |the type test for Nested cannot be checked at runtime because it's a local class
805[warn] |
806[warn] | longer explanation available when compiling with `-explain`
807[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
808[warn] 150 | case FromParent(t) =>
809[warn] | ^
810[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
811[warn] |
812[warn] | longer explanation available when compiling with `-explain`
813[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
814[warn] 154 | case ChildDone(v) =>
815[warn] | ^
816[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
817[warn] |
818[warn] | longer explanation available when compiling with `-explain`
819[warn] 13 warnings found
820[info] done compiling
821[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.8.0-RC6/classes ...
822[info] done compiling
823Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
824Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
825[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0-RC6/classes ...
826[info] done compiling
827[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0-RC6/test-classes ...
828[info] done compiling
82901:29:16.014 [pool-28-thread-7] INFO ox.logback.InheritableMDC$ -- Scoped-value based MDC initialized
830[info] InheritableMDCTest:
831[info] - should make MDC values available in forks
832Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
833Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
834[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.8.0-RC6/test-classes ...
835[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
836[warn] 43 | def transformation(i: Int) =
837[warn] | ^
838[warn] | unused explicit parameter
839[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
840[warn] 38 | def transformation(i: Int) =
841[warn] | ^
842[warn] | unused explicit parameter
843[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
844[warn] 43 | def transformation(i: Int) =
845[warn] | ^
846[warn] | unused explicit parameter
847[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
848[warn] 80 | (1 to 5).map(i =>
849[warn] | ^
850[warn] | unused explicit parameter
851[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
852[warn] 126 | use(new TestResource, _.release()) { r =>
853[warn] | ^
854[warn] | unused explicit parameter
855[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
856[warn] 140 | useCloseable(new TestResource) { r =>
857[warn] | ^
858[warn] | unused explicit parameter
859[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
860[warn] 157 | use(new TestResource, _.release()) { r =>
861[warn] | ^
862[warn] | unused explicit parameter
863[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:5:19
864[warn] 5 |import ox.{timeout as _, *}
865[warn] | ^^^^^^^^^^^^
866[warn] | unused import
867[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:5:19
868[warn] 5 |import ox.{timeout as _, *}
869[warn] | ^^^^^^^^^^^^
870[warn] | unused import
871[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
872[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
873[warn] | ^
874[warn] | unused implicit parameter
875[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
876[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
877[warn] | ^
878[warn] | unused explicit parameter
879[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
880[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
881[warn] | ^
882[warn] | unused explicit parameter
883[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
884[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
885[warn] | ^
886[warn] | unused explicit parameter
887[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
888[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
889[warn] | ^
890[warn] | unused explicit parameter
891[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
892[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
893[warn] | ^
894[warn] | unused explicit parameter
895[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
896[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
897[warn] | ^
898[warn] | unused explicit parameter
899[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
900[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
901[warn] | ^
902[warn] | unused explicit parameter
903[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
904[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
905[warn] | ^
906[warn] | unused explicit parameter
907[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
908[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
909[warn] | ^^^^^^^
910[warn] | unused explicit parameter
911[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
912[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
913[warn] | ^^^^^^^
914[warn] | unused explicit parameter
915[warn] 20 warnings found
916[info] done compiling
917[info] AfterAttemptTest:
918[info] RetryPolicy afterAttempt callback
919[info] - should retry a succeeding function with afterAttempt callback
920[info] - should retry a failing function with afterAttempt callback
921[info] SourceOpsTest:
922[info] - should pipe one source to another
923[info] - should pipe one source to another (with done propagation)
924[info] - should tap over a source
925[info] FlowOpsReduceTest:
926[info] reduce
927[info] - should throw NoSuchElementException for reduce over the empty source
928[info] - should throw exception thrown in `f` when `f` throws
929[info] - should return first element from reduce over the single element source
930[info] - should run reduce over on non-empty source
931[info] ExceptionTest:
932[info] unsupervised
933[2026-01-08T00:29:39.983915974Z] [24] CustomException
934[info] - should throw the exception thrown by a joined fork
935[info] supervised
936[2026-01-08T00:29:39.998111998Z] [24] CustomException
937[info] - should throw the exception thrown in the scope
938[2026-01-08T00:29:40.001526936Z] [24] CustomException(suppressed=ExecutionException)
939[info] - should retain the original exception for context, as suppressed
940[2026-01-08T00:29:40.005204904Z] [24] CustomException
941[info] - should throw the exception thrown by a failing fork
942[2026-01-08T00:29:40.110214756Z] [24] CustomException(suppressed=ExecutionException,InterruptedException,InterruptedException)
943[info] - should interrupt other forks when there's a failure, add suppressed interrupted exceptions
944[2026-01-08T00:29:40.213822940Z] [24] CustomException(suppressed=ExecutionException,CustomException2)
945[info] - should interrupt other forks when there's a failure, add suppressed custom exceptions
946[2026-01-08T00:29:40.216898827Z] [24] CustomException(suppressed=ExecutionException,InterruptedException)
947[info] - should not add the original exception as suppressed
948[2026-01-08T00:29:40.223134377Z] [24] CustomException(suppressed=ExecutionException,CustomException3)
949[info] - should add an exception as suppressed, even if it wraps the original exception
950[info] joinEither
951[info] - should catch the exception with which a fork ends
952[info] ScheduleFallingBackRetryTest:
953[info] retry with combination of schedules
954[info] - should retry 3 times immediately and then 2 times with delay
955[info] - should retry forever
956[info] DelayedRetryTest:
957[info] Delayed retry
958[info] - should retry a function
959[info] - should retry a failing function forever
960[info] - should retry an Either
961[info] adaptive retry with delayed config
962[info] - should retry a failing function forever or until adaptive retry blocks it
963[info] CircuitBreakerTest:
964[info] Circuit Breaker run operations
965[info] - should run operation when metrics are not exceeded
966[info] - should drop operation after exceeding failure threshold
967[info] - should drop operation after exceeding slow call threshold
968[info] Circuit Breaker scheduled state changes
969[info] - should switch to halfOpen after configured time
970[info] - should switch back to open after configured timeout in half open state
971[info] - should correctly transitions through states when there are concurrently running operations
972[info] - should correctly calculate metrics when results come in after state change
973[info] FlowOpsTakeWhileTest:
974[info] takeWhile
975[info] - should not take from the empty flow
976[info] - should take as long as predicate is satisfied
977[info] - should take the failed element if includeFirstFailing = true
978[info] - should work if all elements match the predicate
979[info] - should fail the sourcewith the same exception as the initial source
980[info] - should not take if predicate fails for first or more elements
981[info] FlowOpsInterleaveAllTest:
982[info] interleaveAll
983[info] - should interleave no sources
984[info] - should interleave a single flow
985[info] - should interleave multiple flows
986[info] - should interleave multiple flows using custom segment size
987[info] - should interleave multiple flows using custom segment size and complete eagerly
988[info] AppErrorTest:
989[info] supervisedError
990[info] - should return the app error from the main body
991[info] - should return success from the main body
992[info] - should return the app error returned by a failing fork
993[info] - should return success from the main body if a fork is successful
994[info] - should interrupt other forks if one fails
995[info] ChunkTest:
996[info] Chunk
997[info] - should create empty chunks
998[info] - should create chunks from arrays
999[info] - should create chunks from IArrays
1000[info] - should create chunks from elements
1001[info] - should create empty chunks from empty arrays
1002[info] - should support random access
1003[info] - should throw IndexOutOfBoundsException for invalid indices
1004[info] - should support iteration
1005[info] - should support foreach operations
1006[info] - should concatenate two non-empty chunks efficiently
1007[info] - should handle concatenation with empty chunks
1008[info] - should support chained concatenation
1009[info] - should concatenate chunks of different types
1010[info] - should concatenate non-empty chunk with non-chunk collections
1011[info] - should concatenate empty chunk with non-chunk collections
1012[info] - should handle concatenation with empty collections
1013[info] - should support drop operations
1014[info] - should support take operations
1015[info] - should handle drop/take on concatenated chunks
1016[info] - should support map operations
1017[info] - should support filter operations
1018[info] - should support collect operations
1019[info] - should convert to arrays correctly
1020[info] - should convert concatenated chunks to arrays correctly
1021[info] - should convert byte chunks to strings
1022[info] - should convert concatenated byte chunks to strings
1023[info] - should provide access to backing arrays
1024[info] - should allow efficient processing via backing arrays
1025[info] - should handle operations on empty chunks
1026[info] - should maintain consistency between single and multi-array chunks
1027[info] - should handle large chunks efficiently
1028[info] - should support indexWhere on single chunks
1029[info] - should support indexWhere on concatenated chunks
1030[info] - should handle indexWhere on empty chunks
1031[info] - should handle indexWhere edge cases with concatenated chunks
1032[info] - should support contains and exists operations
1033[info] FlowOpsFoldTest:
1034[info] fold
1035[info] - should throw an exception for a failed flow
1036[info] - should throw exception thrown in `f` when `f` throws
1037[info] - should return `zero` value from fold on the empty source
1038[info] - should return fold on non-empty fold
1039[info] FlowOpsFilterTest:
1040[info] filter
1041[info] - should not filter anything from the empty flow
1042[info] - should filter out everything if no element meets 'f'
1043[info] - should not filter anything if all the elements meet 'f'
1044[info] - should filter out elements that don't meet 'f'
1045[info] FlowOpsMapUsingSinkTest:
1046[info] mapUsingSink
1047[info] - should map over a source, using emit
1048[info] - should propagate errors
1049[info] FlowOpsCollectTest:
1050[info] collect
1051[info] - should collect over a source
1052[info] FlowOpsGroupedTest:
1053[info] grouped
1054[info] - should emit grouped elements
1055[info] - should emit grouped elements and include remaining values when flow closes
1056[info] - should return failed flow when the original flow is failed
1057[info] groupedWeighted
1058[info] - should emit grouped elements with custom cost function
1059[info] - should return failed flow when cost function throws exception
1060[info] - should return failed source when the original source is failed
1061[info] groupedWithin
1062[info] - should group first batch of elements due to limit and second batch due to timeout
1063[info] - should group first batch of elements due to timeout and second batch due to limit
1064[info] - should wake up on new element and send it immediately after first batch is sent and channel goes to time-out mode
1065[info] - should send the group only once when the channel is closed
1066[info] - should return failed source when the original source is failed
1067[info] groupedWeightedWithin
1068[info] - should group elements on timeout in the first batch and consider max weight in the remaining batches
1069[info] - should return failed source when cost function throws exception
1070[info] - should return failed source when the original source is failed
1071[info] MapParTest:
1072[info] mapPar
1073[info] - should output the same type as input
1074[info] - should run computations in parallel
1075[info] - should run not more computations than limit
1076[2026-01-08T00:30:01.150086623Z] [386] exception
1077[2026-01-08T00:30:01.152795537Z] [24] catch
1078[2026-01-08T00:30:01.453016337Z] [24] all done
1079[info] - should interrupt other computations in one fails
1080[info] RateLimiterInterfaceTest:
1081[info] RateLimiter interface
1082[info] - should drop or block operation depending on method used for fixed rate algorithm
1083[info] - should drop or block operation depending on method used for sliding window algorithm
1084[info] - should drop or block operation depending on method used for bucket algorithm
1085[info] - should drop or block operation concurrently
1086[info] UtilTest:
1087[info] discard
1088[2026-01-08T00:30:07.482480271Z] [24] in f
1089[info] - should do nothing
1090[info] tapException
1091[2026-01-08T00:30:07.483619812Z] [24] in callback: boom!
1092[2026-01-08T00:30:07.483783818Z] [24] in catch: boom!
1093[2026-01-08T00:30:07.484445349Z] [24] 42
1094[2026-01-08T00:30:07.484515490Z] [24] after
1095[2026-01-08T00:30:07.487333827Z] [24] in catch: boom! 1
1096[info] - should run the callback when an exception is thrown
1097[info] - should not run the callback when no exception is thrown
1098[info] - should suppress any additional exceptions
1099[info] pipe
1100[info] - should work
1101[info] tap
1102[2026-01-08T00:30:07.488868154Z] [24] Adding
1103[2026-01-08T00:30:07.489966940Z] [24] Got: 3
1104[info] - should work
1105[info] debug as extension
1106some label: 10
1107[info] - should work
1108[info] debug as top-level method
1109x.+(1) = 11
1110[info] - should work
1111[info] FlowOpsLastTest:
1112[info] last
1113[info] - should throw NoSuchElementException for the empty source
1114[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
1115[info] - should return last element for the non-empty source
1116[info] FlowOpsFailedTest:
1117[info] failed
1118[info] - should fail on receive
1119[info] FlowOpsFlattenTest:
1120[info] flatten
1121[info] - should flatten nested flows
1122[info] WeightedHeapTest:
1123[info] WeightedHeap
1124[info] - should allow inserting elements with weights
1125[info] - should allow extracting the minimum element
1126[info] - should return None when extracting from an empty heap
1127[info] - should return the correct size after operations
1128[info] - should handle empty heaps correctly
1129[info] - should update the weight of an existing element and adjust its position
1130[info] - should throw an exception when updating the weight of a non-existent element
1131[info] - should handle multiple insertions and updates correctly
1132[info] - should handle duplicate insertions by updating the existing element's weight
1133[info] - should handle increasing the weight of an existing element
1134[info] - should maintain heap property after multiple weight increases
1135[info] - should work correctly when increasing the weight of the current minimum element
1136[info] - should handle increasing weights in a large heap
1137[info] - should maintain the heap property after multiple operations
1138[info] - should work with large numbers of elements
1139[info] - should maintain heap property with random insertions and extractions
1140[info] - should maintain heap property with random weight updates
1141[info] FlowOpsMapConcatTest:
1142[info] mapConcat
1143[info] - should unfold iterables
1144[info] - should transform elements
1145[info] - should handle empty lists
1146[info] - should propagate errors in the mapping function
1147[info] FlowOpsPipeToTest:
1148[info] - should pipe one source to another
1149[info] - should pipe one source to another (with done propagation)
1150[info] FlowOpsRecoverTest:
1151[info] Flow.recover
1152[info] - should pass through elements when upstream flow succeeds
1153[info] - should emit recovery value when upstream flow fails with handled exception
1154[info] - should not emit recovery value when downstream flow fails with handled exception
1155[info] - should propagate unhandled exceptions
1156[info] - should handle multiple exception types
1157[info] - should work with different recovery value type
1158[info] - should handle exception thrown during flow processing
1159[info] - should work with empty flow
1160[info] - should propagate exception when partial function throws
1161[info] FlowOpsMapStatefulTest:
1162[info] mapStateful
1163[info] - should zip with index
1164[info] - should calculate a running total
1165[info] - should be able to emit different values than incoming ones
1166[info] - should propagate errors in the mapping function
1167[info] - should propagate errors in the completion callback
1168[info] FlowOpsMapTest:
1169[info] map
1170[info] - should map over a source
1171[info] - should map over a source using for-syntax
1172[info] ForkTest:
1173[2026-01-08T00:30:07.691613795Z] [24] main mid
1174[info] fork
1175[2026-01-08T00:30:08.191987488Z] [435] f1 complete
1176[2026-01-08T00:30:08.692068497Z] [436] f2 complete
1177[2026-01-08T00:30:08.692511392Z] [24] result = 11
1178[info] - should run two forks concurrently
1179[2026-01-08T00:30:08.695583990Z] [438] f2 complete
1180[2026-01-08T00:30:08.695746122Z] [437] f1 complete
1181[2026-01-08T00:30:08.696200761Z] [24] result = 11
1182[info] - should allow nested forks
1183[2026-01-08T00:30:08.697613752Z] [24] main mid
1184[2026-01-08T00:30:09.198180324Z] [439] f1 complete
1185[2026-01-08T00:30:09.198523076Z] [24] result = 5
1186[2026-01-08T00:30:09.198964635Z] [440] f2 interrupted
1187[info] - should interrupt child forks when parents complete
1188[2026-01-08T00:30:09.202101225Z] [444] in fork
1189[info] - should allow starting forks within a forkCancellable body, using the outer scope
1190[2026-01-08T00:30:09.303524648Z] [447] in fork
1191[info] - should allow starting forks in outer scope, from an inner scope
1192[2026-01-08T00:30:09.305283584Z] [449] IllegalStateException
1193[info] - should not allow starting forks from a thread created not by the scope
1194[info] FlowOpsIntersperseTest:
1195[info] Flow.intersperse
1196[info] - should intersperse with inject only over an empty source
1197[info] - should intersperse with inject only over a source with one element
1198[info] - should intersperse with inject only over a source with multiple elements
1199[info] - should intersperse with start, inject and end over an empty source
1200[info] - should intersperse with start, inject and end over a source with one element
1201[info] - should intersperse with start, inject and end over a source with multiple elements
1202[info] FlowOpsScanTest:
1203[info] scan
1204[info] - should scan the empty flow
1205[info] - should scan a flow of summed Int
1206[info] - should scan a flow of multiplied Int
1207[info] - should scan a flow of concatenated String
1208[info] FlowOpsUsingSinkTest:
1209[info] usingSink
1210[info] - should send the passed elements
1211[info] FlowOpsTakeTest:
1212[info] take
1213[info] - should take from a simple flow
1214[info] - should take from an async flow
1215[info] - should take all if the flow ends sooner than the desired number of elements
1216[info] EitherTest:
1217[info] either
1218[info] - should work correctly when invoked on eithers
1219[info] - should work correctly when invoked on options
1220[info] - should work correctly when invoked on fork
1221[info] - should report a proper compilation error when used outside of either:
1222[info] - should report a proper compilation error when wrong error type is used for ok() (explicit type params)
1223[info] - should report a proper compilation error when wrong successful type is used (explicit type params)
1224[info] - should report a proper compilation error when wrong type annotation is used for ok() (error)
1225[info] - should report a proper compilation error when wrong type annotation is used (success)
1226[info] - should report a proper compilation error when wrong error type is used for fail() (explicit type params)
1227[info] - should report a proper compilation error when wrong type annotation is used for fail() (error)
1228[info] - should catch non fatal exceptions
1229[info] - should not catch fatal exceptions
1230[info] - should provide an either scope when catching non fatal exceptions
1231[info] - should report a proper compilation error when wrong error type is used for ok() in catchingNonFatal block
1232[info] - should work when combined with mapPar
1233[info] - should not allow nesting of eithers
1234[info] orThrow
1235[info] - should unwrap the value for a Right-value
1236[info] - should throw exceptions for a Left-value
1237[info] catching
1238[info] - should catch given exceptions only
1239[info] - should catch parent exceptions
1240[info] - should not catch non-given exceptions
1241[info] - should not catch fatal exceptions
1242[info] - should return successful results as Right-values
1243[info] FlowIOOpsTest:
1244[info] asInputStream
1245[info] - should return an empty InputStream for an empty source
1246[info] - should return an InputStream for a simple source
1247[info] - should correctly track available bytes
1248[info] - should support bulk read operations with read(byte[])
1249[info] - should handle bulk read operations across multiple chunks
1250[info] - should handle bulk read with concatenated chunks (multiple backing arrays)
1251[info] - should handle read(byte[], offset, length) with various parameters
1252[info] - should handle edge cases for read(byte[], offset, length)
1253[info] - should throw appropriate exceptions for invalid read parameters
1254[info] - should maintain consistency between single-byte and bulk reads
1255[info] - should handle chunks with empty backing arrays
1256[info] - should handle flow with only empty chunks
1257[info] - should handle mixed empty and non-empty chunks in flow
1258[info] toOutputStream
1259[info] - should write a single chunk with bytes to an OutputStream
1260[info] - should write multiple chunks with bytes to an OutputStream
1261[info] - should write concatenated chunks to an OutputStream
1262[info] - should handle an empty Source
1263[info] - should close the OutputStream on write error
1264[info] - should close the OutputStream on error
1265[info] toFile
1266[info] - should open existing file and write a single chunk with bytes
1267[info] - should open existing file and write multiple chunks with bytes
1268[info] - should create file and write multiple chunks with bytes
1269[info] - should write concatenated chunks to a file
1270[info] - should use an existing file and overwrite it a single chunk with bytes
1271[info] - should handle an empty source
1272[info] - should throw an exception on failing Source
1273[info] - should throw an exception if path is a directory
1274[info] - should throw an exception if file cannot be opened
1275[info] FlowOpsConcatPrependTest:
1276[info] concat
1277[info] - should concat other source
1278[info] prepend
1279[info] - should prepend other source
1280[info] FlowTextOpsTest:
1281[info] linesUtf8
1282[info] - should split a single chunk of bytes into lines
1283[info] - should split a single chunk of bytes into lines (multiple newlines)
1284[info] - should split a single chunk of bytes into lines (beginning with newline)
1285[info] - should split a single chunk of bytes into lines (ending with newline)
1286[info] - should split a single chunk of bytes into lines (empty array)
1287[info] - should split a multiple chunks of bytes into lines
1288[info] - should split a multiple chunks of bytes into lines (multiple newlines)
1289[info] - should split a multiple chunks of bytes into lines (multiple empty chunks)
1290[info] lines(charset)
1291zażółć
1292gęślą
1293jaźń
1294[info] - should decode lines with specified charset
1295[info] - should decode lines correctly across chunk boundaries
1296[info] decodeStringUtf8
1297[info] - should decode a simple string
1298[info] - should decode a chunked string with UTF-8 multi-byte characters
1299[info] - should handle an empty Source
1300[info] - should handle partial BOM
1301[info] - should handle a string shorter than BOM
1302[info] - should handle empty chunks
1303[info] encodeUtf8
1304[info] - should handle empty String
1305[info] - should encode a string
1306[info] FlowOpsZipWithIndexTest:
1307[info] zipWithIndex
1308[info] - should not zip anything from an empty flow
1309[info] - should zip flow with index
1310[info] ResourceTest:
1311[info] useInScope
1312[2026-01-08T00:30:09.558667252Z] [24] allocate
1313[2026-01-08T00:30:09.559497420Z] [550] release 1
1314[info] - should release resources after allocation
1315[2026-01-08T00:30:09.561266663Z] [24] allocate 1
1316[2026-01-08T00:30:09.561770561Z] [24] allocate 2
1317[2026-01-08T00:30:09.563092353Z] [551] release 2
1318[2026-01-08T00:30:09.563265896Z] [551] release 1
1319[info] - should release resources in reverse order
1320[2026-01-08T00:30:09.565531116Z] [24] allocate 1
1321[2026-01-08T00:30:09.566013945Z] [24] allocate 2
1322[2026-01-08T00:30:09.566578113Z] [552] release 2
1323[2026-01-08T00:30:09.566697395Z] [552] release 1
1324[2026-01-08T00:30:09.567156790Z] [24] exception
1325[info] - should release resources when there's an exception
1326[2026-01-08T00:30:09.568392473Z] [24] allocate 1
1327[2026-01-08T00:30:09.568851896Z] [24] allocate 2
1328[2026-01-08T00:30:09.569412673Z] [553] release 2
1329[2026-01-08T00:30:09.569775516Z] [553] release 1
1330[info] - should release resources when there's an exception during releasing (normal resutl)
1331[2026-01-08T00:30:09.570218420Z] [24] exception e2
1332[2026-01-08T00:30:09.571355390Z] [24] allocate 1
1333[2026-01-08T00:30:09.572191938Z] [24] allocate 2
1334[2026-01-08T00:30:09.572719159Z] [554] release 2
1335[2026-01-08T00:30:09.572854631Z] [554] release 1
1336[2026-01-08T00:30:09.573698212Z] [24] exception e3
1337[info] - should release resources when there's an exception during releasing (exceptional resutl)
1338[2026-01-08T00:30:09.575334916Z] [24] in scope
1339[2026-01-08T00:30:09.575649833Z] [555] release
1340[info] - should release registered resources
1341[2026-01-08T00:30:09.576690781Z] [24] allocate
1342[2026-01-08T00:30:09.576789575Z] [24] in scope
1343[2026-01-08T00:30:09.577781221Z] [556] release
1344[info] - should use a resource
1345[2026-01-08T00:30:09.579173643Z] [24] allocate
1346[2026-01-08T00:30:09.579244906Z] [24] in scope
1347[2026-01-08T00:30:09.579859861Z] [557] release
1348[info] - should use a closeable resource
1349[2026-01-08T00:30:09.581682935Z] [24] allocate
1350[2026-01-08T00:30:09.581784455Z] [24] in scope
1351[2026-01-08T00:30:09.582966719Z] [558] release
1352[2026-01-08T00:30:09.583876382Z] [24] exception e2 (e1)
1353[info] - should add suppressed exception when there's an exception during releasing
1354[info] FlowOpsMapParTest:
1355[info] mapPar
1356[info] - should map over a flow with parallelism limit 1
1357[info] - should map over a flow with parallelism limit 2
1358[info] - should map over a flow with parallelism limit 3
1359[info] - should map over a flow with parallelism limit 4
1360[info] - should map over a flow with parallelism limit 5
1361[info] - should map over a flow with parallelism limit 6
1362[info] - should map over a flow with parallelism limit 7
1363[info] - should map over a flow with parallelism limit 8
1364[info] - should map over a flow with parallelism limit 9
1365[info] - should map over a flow with parallelism limit 10
1366[info] - should map over a flow with parallelism limit 10 (stress test)
1367[info] + iteration 1
1368[info] + iteration 2
1369[info] + iteration 3
1370[info] + iteration 4
1371[info] + iteration 5
1372[info] + iteration 6
1373[info] + iteration 7
1374[info] + iteration 8
1375[info] + iteration 9
1376[info] + iteration 10
1377[info] + iteration 11
1378[info] + iteration 12
1379[info] + iteration 13
1380[info] + iteration 14
1381[info] + iteration 15
1382[info] + iteration 16
1383[info] + iteration 17
1384[info] + iteration 18
1385[info] + iteration 19
1386[info] + iteration 20
1387[info] + iteration 21
1388[info] + iteration 22
1389[info] + iteration 23
1390[info] + iteration 24
1391[info] + iteration 25
1392[info] + iteration 26
1393[info] + iteration 27
1394[info] + iteration 28
1395[info] + iteration 29
1396[info] + iteration 30
1397[info] + iteration 31
1398[info] + iteration 32
1399[info] + iteration 33
1400[info] + iteration 34
1401[info] + iteration 35
1402[info] + iteration 36
1403[info] + iteration 37
1404[info] + iteration 38
1405[info] + iteration 39
1406[info] + iteration 40
1407[info] + iteration 41
1408[info] + iteration 42
1409[info] + iteration 43
1410[info] + iteration 44
1411[info] + iteration 45
1412[info] + iteration 46
1413[info] + iteration 47
1414[info] + iteration 48
1415[info] + iteration 49
1416[info] + iteration 50
1417[info] + iteration 51
1418[info] + iteration 52
1419[info] + iteration 53
1420[info] + iteration 54
1421[info] + iteration 55
1422[info] + iteration 56
1423[info] + iteration 57
1424[info] + iteration 58
1425[info] + iteration 59
1426[info] + iteration 60
1427[info] + iteration 61
1428[info] + iteration 62
1429[info] + iteration 63
1430[info] + iteration 64
1431[info] + iteration 65
1432[info] + iteration 66
1433[info] + iteration 67
1434[info] + iteration 68
1435[info] + iteration 69
1436[info] + iteration 70
1437[info] + iteration 71
1438[info] + iteration 72
1439[info] + iteration 73
1440[info] + iteration 74
1441[info] + iteration 75
1442[info] + iteration 76
1443[info] + iteration 77
1444[info] + iteration 78
1445[info] + iteration 79
1446[info] + iteration 80
1447[info] + iteration 81
1448[info] + iteration 82
1449[info] + iteration 83
1450[info] + iteration 84
1451[info] + iteration 85
1452[info] + iteration 86
1453[info] + iteration 87
1454[info] + iteration 88
1455[info] + iteration 89
1456[info] + iteration 90
1457[info] + iteration 91
1458[info] + iteration 92
1459[info] + iteration 93
1460[info] + iteration 94
1461[info] + iteration 95
1462[info] + iteration 96
1463[info] + iteration 97
1464[info] + iteration 98
1465[info] + iteration 99
1466[info] + iteration 100
1467[info] - should propagate errors
1468[2026-01-08T00:30:18.212907268Z] [1916] done
1469[2026-01-08T00:30:18.212907237Z] [1917] done
1470[2026-01-08T00:30:18.313430853Z] [1919] exception
1471[info] - should cancel other running forks when there's an error
1472[info] - should handle empty flow
1473[info] - should handle flow with exactly parallelism number of elements
1474[info] - should handle flow with less than parallelism number of elements
1475[info] - should preserve order even with varying processing times
1476[info] - should preserve order with random processing times
1477[info] - should work with very high parallelism values
1478[info] SelectOrClosedWithinTest:
1479[info] selectOrClosedWithin
1480[info] - should select a clause that can complete immediately
1481[info] - should return timeout when no clause can complete within the timeout
1482[info] - should select a source that has a value immediately
1483[info] - should return timeout when no source has a value within the timeout
1484[info] - should work with different timeout value types
1485[info] - should handle empty clauses sequence
1486[info] - should handle empty sources sequence
1487[info] selectOrClosedWithin with single clause
1488[info] - should complete when clause is ready
1489[info] - should timeout when clause is not ready
1490[info] selectOrClosedWithin with multiple clauses
1491[info] - should select the first ready clause
1492[info] - should timeout when no clauses are ready
1493[info] selectOrClosedWithin with sources
1494[info] - should select from ready source
1495[info] - should timeout when no sources are ready
1496[info] selectOrClosedWithin error scenarios
1497[info] - should handle channel closed with done
1498[info] - should handle channel closed with error
1499[info] - should prioritize ready channels over closed ones
1500[info] selectOrClosedWithin with different timeout types
1501[info] - should work with various timeout value types
1502[info] selectOrClosedWithin with sequences
1503[info] - should handle empty sequences
1504[info] - should handle sequence of clauses
1505[info] - should handle sequence of sources
1506[info] selectOrClosedWithin with various arities
1507[info] - should work with all supported clause counts
1508[info] - should work with all supported source counts
1509[info] FlowOpsSplitTest:
1510[info] split
1511[info] - should split an empty flow
1512[info] - should split a flow with no delimiters
1513[info] - should split a flow with delimiter at the beginning
1514[info] - should split a flow with delimiter at the end
1515[info] - should split a flow with delimiter in the middle
1516[info] - should split a flow with multiple delimiters
1517[info] - should split a flow with adjacent delimiters
1518[info] - should split a flow with only delimiters
1519[info] - should split a flow with single delimiter
1520[info] - should split a flow with single non-delimiter
1521[info] - should split a flow with multiple consecutive delimiters at the beginning
1522[info] - should split a flow with multiple consecutive delimiters at the end
1523[info] - should split a flow with string delimiters
1524[info] - should split a flow using complex predicate
1525[info] - should handle error propagation
1526[info] - should split a large flow efficiently
1527[info] JitterTest:
1528[info] Jitter
1529[info] - should use no jitter
1530[info] - should use full jitter
1531[info] - should use equal jitter
1532[info] - should use decorrelated jitter
1533[info] FlowOpsAlsoToTest:
1534[info] alsoTo
1535[info] - should send to both sinks
1536[info] - should send to both sinks and not hang when other sink is rendezvous channel
1537[info] - should close main flow when other closes
1538[info] - should close main flow with error when other errors
1539[info] - should close other channel with error when main errors
1540[info] FlowOpsBufferTest:
1541[info] buffer
1542[info] - should work with a single async boundary
1543[info] - should work with multiple async boundaries
1544[info] - should propagate errors
1545[info] BackoffRetryTest:
1546[info] Backoff retry
1547[info] - should retry a function
1548[info] - should retry a failing function forever
1549[info] - should respect maximum delay
1550[info] - should use jitter
1551[info] - should retry an Either
1552[info] FlowOpsEnsureTest:
1553[info] ensure.onComplete
1554[info] - should run in case of success
1555[info] - should run in case of error
1556[info] ensure.onDone
1557[info] - should run in case of success
1558[info] - should not run in case of error
1559[info] ensure.onError
1560[info] - should not run in case of success
1561[info] - should run in case of error
1562[info] FlowOpsTakeLastTest:
1563[info] takeLast
1564[info] - should throw ChannelClosedException.Error for source failed without exception
1565[info] - should fail to takeLast when n < 0
1566[info] - should return empty list for the empty source
1567[info] - should return empty list when n == 0 and list is not empty
1568[info] - should return list with all elements if the source is smaller than requested number
1569[info] - should return the last n elements from the source
1570[info] FlowOpsZipAllTest:
1571[info] zipAll
1572[info] - should not emit any element when both flows are empty
1573[info] - should emit this element when other flow is empty
1574[info] - should emit other element when this flow is empty
1575[info] - should emit matching elements when both flows are of the same size
1576[info] - should emit default for other flow if this flow is longer
1577[info] - should emit default for this flow if other flow is longer
1578[info] FlowPublisherTckTest:
1579[info] - required_createPublisher1MustProduceAStreamOfExactly1Element
1580[info] - required_createPublisher3MustProduceAStreamOfExactly3Elements
1581[info] - required_validate_maxElementsFromPublisher
1582[info] - required_validate_boundedDepthOfOnNextAndRequestRecursion
1583[info] - required_spec101_subscriptionRequestMustResultInTheCorrectNumberOfProducedElements
1584[info] - required_spec102_maySignalLessThanRequestedAndTerminateSubscription
1585[info] - stochastic_spec103_mustSignalOnMethodsSequentially
1586[info] - optional_spec104_mustSignalOnErrorWhenFails
1587[info] - required_spec105_mustSignalOnCompleteWhenFiniteStreamTerminates
1588[info] - optional_spec105_emptyStreamMustTerminateBySignallingOnComplete
1589[info] - required_spec107_mustNotEmitFurtherSignalsOnceOnCompleteHasBeenSignalled
1590[info] - untested_spec107_mustNotEmitFurtherSignalsOnceOnErrorHasBeenSignalled !!! IGNORED !!!
1591[info] - untested_spec109_subscribeShouldNotThrowNonFatalThrowable !!! IGNORED !!!
1592[info] - required_spec109_subscribeThrowNPEOnNullSubscriber
1593[info] - required_spec109_mustIssueOnSubscribeForNonNullSubscriber
1594[info] - required_spec109_mayRejectCallsToSubscribeIfPublisherIsUnableOrUnwillingToServeThemRejectionMustTriggerOnErrorAfterOnSubscribe
1595[info] - untested_spec110_rejectASubscriptionRequestIfTheSameSubscriberSubscribesTwice !!! IGNORED !!!
1596[info] - optional_spec111_maySupportMultiSubscribe
1597[info] - optional_spec111_registeredSubscribersMustReceiveOnNextOrOnCompleteSignals
1598[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingOneByOne
1599[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfront
1600[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfrontAndCompleteAsExpected
1601[info] - required_spec302_mustAllowSynchronousRequestCallsFromOnNextAndOnSubscribe
1602[info] - required_spec303_mustNotAllowUnboundedRecursion
1603[info] - untested_spec304_requestShouldNotPerformHeavyComputations !!! IGNORED !!!
1604[info] - untested_spec305_cancelMustNotSynchronouslyPerformHeavyComputation !!! IGNORED !!!
1605[info] - required_spec306_afterSubscriptionIsCancelledRequestMustBeNops
1606[info] - required_spec307_afterSubscriptionIsCancelledAdditionalCancelationsMustBeNops
1607[info] - required_spec309_requestZeroMustSignalIllegalArgumentException
1608[info] - required_spec309_requestNegativeNumberMustSignalIllegalArgumentException
1609[info] - required_spec312_cancelMustMakeThePublisherToEventuallyStopSignaling
1610[info] - required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber
1611[info] - required_spec317_mustSupportAPendingElementCountUpToLongMaxValue
1612[info] - required_spec317_mustSupportACumulativePendingElementCountUpToLongMaxValue
1613[info] - required_spec317_mustNotSignalOnErrorWhenPendingAboveLongMaxValue
1614[info] - optional_spec309_requestNegativeNumberMaySignalIllegalArgumentExceptionWithSpecificMessage
1615[info] - untested_spec108_possiblyCanceledSubscriptionShouldNotReceiveOnErrorOrOnCompleteSignals !!! IGNORED !!!
1616[info] - untested_spec106_mustConsiderSubscriptionCancelledAfterOnErrorOrOnCompleteHasBeenCalled !!! IGNORED !!!
1617[info] SourceOpsFactoryMethodsTest:
1618[info] Source factory methods
1619[info] - should create a source from a fork
1620[info] CancelTest:
1621[info] cancel
1622[2026-01-08T00:30:26.552984038Z] [2410] started
1623[2026-01-08T00:30:26.653395570Z] [2410] interrupted
1624[2026-01-08T00:30:27.153878745Z] [2410] interrupted done
1625[2026-01-08T00:30:27.154214638Z] [2408] cancel done
1626[info] - should block until the fork completes
1627[2026-01-08T00:30:28.157367548Z] [2411] cancel done
1628[2026-01-08T00:30:28.259368821Z] [2416] interrupted
1629[2026-01-08T00:30:28.359764415Z] [2416] interrupted done
1630[2026-01-08T00:30:28.360038173Z] [2414] cancel done
1631[2026-01-08T00:30:28.460707394Z] [2417] cancel done
1632[2026-01-08T00:30:28.562656427Z] [2422] interrupted
1633[2026-01-08T00:30:28.663003650Z] [2422] interrupted done
1634[2026-01-08T00:30:28.663216Z] [2420] cancel done
1635[2026-01-08T00:30:28.763968178Z] [2423] cancel done
1636[2026-01-08T00:30:28.865897587Z] [2428] interrupted
1637[2026-01-08T00:30:28.966276489Z] [2428] interrupted done
1638[2026-01-08T00:30:28.966570889Z] [2426] cancel done
1639[2026-01-08T00:30:29.067290895Z] [2429] cancel done
1640[2026-01-08T00:30:29.169236375Z] [2434] interrupted
1641[2026-01-08T00:30:29.269681922Z] [2434] interrupted done
1642[2026-01-08T00:30:29.269942850Z] [2432] cancel done
1643[2026-01-08T00:30:29.370585021Z] [2435] cancel done
1644[2026-01-08T00:30:29.472501784Z] [2440] interrupted
1645[2026-01-08T00:30:29.572887821Z] [2440] interrupted done
1646[2026-01-08T00:30:29.573130324Z] [2438] cancel done
1647[2026-01-08T00:30:29.673791982Z] [2441] cancel done
1648[2026-01-08T00:30:29.775713118Z] [2446] interrupted
1649[2026-01-08T00:30:29.876036505Z] [2446] interrupted done
1650[2026-01-08T00:30:29.876234494Z] [2444] cancel done
1651[2026-01-08T00:30:29.976748115Z] [2447] cancel done
1652[2026-01-08T00:30:30.078596806Z] [2452] interrupted
1653[2026-01-08T00:30:30.178978585Z] [2452] interrupted done
1654[2026-01-08T00:30:30.179207058Z] [2450] cancel done
1655[2026-01-08T00:30:30.279851655Z] [2453] cancel done
1656[2026-01-08T00:30:30.381862085Z] [2458] interrupted
1657[2026-01-08T00:30:30.482245175Z] [2458] interrupted done
1658[2026-01-08T00:30:30.482539635Z] [2456] cancel done
1659[2026-01-08T00:30:30.583180245Z] [2459] cancel done
1660[2026-01-08T00:30:30.685187547Z] [2464] interrupted
1661[2026-01-08T00:30:30.785645507Z] [2464] interrupted done
1662[2026-01-08T00:30:30.785912506Z] [2462] cancel done
1663[2026-01-08T00:30:30.886591064Z] [2465] cancel done
1664[2026-01-08T00:30:30.988376598Z] [2470] interrupted
1665[2026-01-08T00:30:31.088755140Z] [2470] interrupted done
1666[2026-01-08T00:30:31.089020111Z] [2468] cancel done
1667[info] - should block until the fork completes (stress test)
1668[info] + iteration 1
1669[info] + iteration 2
1670[info] + iteration 3
1671[info] + iteration 4
1672[info] + iteration 5
1673[info] + iteration 6
1674[info] + iteration 7
1675[info] + iteration 8
1676[info] + iteration 9
1677[info] + iteration 10
1678[info] + iteration 11
1679[info] + iteration 12
1680[info] + iteration 13
1681[info] + iteration 14
1682[info] + iteration 15
1683[info] + iteration 16
1684[info] + iteration 17
1685[info] + iteration 18
1686[info] + iteration 19
1687[info] + iteration 20
1688[info] cancelNow
1689[2026-01-08T00:30:31.292488348Z] [2471] cancel done
1690[2026-01-08T00:30:31.792858434Z] [2473] interrupted done
1691[info] - should return immediately, and wait for forks when scope completes
1692[info] - should (when followed by a joinEither) catch InterruptedException with which a fork ends
1693[info] FlowOpsTapTest:
1694[info] - should tap over a flow
1695[info] FlowOpsAlsoToTapTest:
1696[info] alsoToTap
1697[info] - should send to both sinks when other is faster
1698[info] - should send to both sinks when other is slower
1699[info] - should not fail the flow when the other sink fails
1700[info] - should not close the flow when the other sink closes
1701[info] SourceOpsFailedTest:
1702[info] Source.failed
1703[info] - should fail on receive
1704[info] - should be in error
1705[info] FlowOpsDebounceTest:
1706[info] debounce
1707[info] - should not debounce if applied on an empty flow
1708[info] - should not debounce if applied on a flow containing only distinct values
1709[info] - should debounce if applied on a flow containing only repeating values
1710[info] - should debounce if applied on a flow containing repeating elements
1711[info] FlowOpsThrottleTest:
1712[info] throttle
1713[info] - should not throttle the empty source
1714[info] - should throttle to specified elements per time units
1715[info] - should fail to throttle when elements <= 0
1716[info] - should fail to throttle when per lower than 1ms
1717[info] FlowOpsRunToChannelTest:
1718[info] runToChannel
1719[info] - should receive the elements in the flow
1720[info] - should return the original source when running a source-backed flow
1721[info] FlowOpsTimeoutTest:
1722[info] - should timeout
1723[info] FlowOpsZipTest:
1724[info] - should zip two sources
1725[info] FixedRateRepeatTest:
1726[info] repeat
1727[info] - should repeat a function at fixed rate
1728[info] - should repeat a function at fixed rate with initial delay
1729[info] - should repeat a function forever at fixed rate
1730[info] - should repeat a function forever at fixed rate with initial delay
1731[info] ForeachParTest:
1732[info] foreachPar
1733[2026-01-08T00:30:33.931518176Z] [2505] 3
1734[2026-01-08T00:30:33.931510667Z] [2502] 0
1735[2026-01-08T00:30:33.931510628Z] [2506] 4
1736[2026-01-08T00:30:33.931510689Z] [2504] 2
1737[2026-01-08T00:30:33.931632293Z] [2503] 1
1738[2026-01-08T00:30:34.032198535Z] [2509] 7
1739[2026-01-08T00:30:34.032186154Z] [2508] 6
1740[2026-01-08T00:30:34.032251804Z] [2511] 9
1741[2026-01-08T00:30:34.032245219Z] [2510] 8
1742[2026-01-08T00:30:34.032397573Z] [2507] 5
1743[2026-01-08T00:30:34.132698799Z] [2512] 10
1744[2026-01-08T00:30:34.132747110Z] [2513] 11
1745[2026-01-08T00:30:34.132781997Z] [2514] 12
1746[2026-01-08T00:30:34.132835237Z] [2515] 13
1747[2026-01-08T00:30:34.132833831Z] [2516] 14
1748[2026-01-08T00:30:34.233248470Z] [2517] 15
1749[2026-01-08T00:30:34.233419126Z] [2518] 16
1750[2026-01-08T00:30:34.233438837Z] [2519] 17
1751[info] - should run computations in parallel
1752[info] - should run not more computations than limit
1753[2026-01-08T00:30:34.568792198Z] [2684] exception
1754[2026-01-08T00:30:34.569425652Z] [24] catch
1755[2026-01-08T00:30:34.869628564Z] [24] all done
1756[info] - should interrupt other computations in one fails
1757[info] FlowOpsFutureSourceTest:
1758[info] futureSource
1759[info] - should return the original future failure when future fails
1760[info] - should return future's source values
1761[info] SourceOpsTransformTest:
1762[info] Source.transform
1763[info] - should transform a source using a simple map
1764[info] - should transform a source using a complex chain of operations
1765[info] - should transform an infinite source
1766[info] - should transform an infinite source (stress test)
1767[info] RateLimiterTest:
1768[info] fixed rate RateLimiter
1769[info] - should drop operation when rate limit is exceeded
1770[info] - should restart rate limiter after given duration
1771[info] - should block operation when rate limit is exceeded
1772[info] - should respect time constraints when blocking
1773[info] - should respect time constraints when blocking concurrently
1774[info] - should allow to run more long running operations concurrently than max rate when not considering operation's time
1775[info] - should not allow to run more long running operations concurrently than max rate when considering operation time
1776[info] sliding window RateLimiter
1777[info] - should drop operation when rate limit is exceeded
1778[info] - should restart rate limiter after given duration
1779[info] - should block operation when rate limit is exceeded
1780[info] - should respect time constraints when blocking
1781[info] - should respect time constraints when blocking concurrently
1782[info] - should not allow to run more operations when operations are still running when considering operation time
1783[info] - should not allow to run more operations when operations are still running in window span when considering operation time
1784[info] bucket RateLimiter
1785[info] - should drop operation when rate limit is exceeded
1786[info] - should refill token after time elapsed from last refill and not before
1787[info] - should block operation when rate limit is exceeded
1788[info] - should respect time constraints when blocking
1789[info] - should respect time constraints when blocking concurrently
1790[info] FlowOpsSplitOnTest:
1791[info] splitOn
1792[info] - should split an empty flow
1793[info] - should split a flow with no delimiters
1794[info] - should split a flow with single-element delimiter at the beginning
1795[info] - should split a flow with single-element delimiter at the end
1796[info] - should split a flow with single-element delimiter in the middle
1797[info] - should split a flow with multiple single-element delimiters
1798[info] - should split a flow with adjacent single-element delimiters
1799[info] - should split a flow with only single-element delimiters
1800[info] - should split a flow with multi-element delimiter at the beginning
1801[info] - should split a flow with multi-element delimiter at the end
1802[info] - should split a flow with multi-element delimiter in the middle
1803[info] - should split a flow with multiple multi-element delimiters
1804[info] - should split a flow with adjacent multi-element delimiters
1805[info] - should split a flow with only multi-element delimiters
1806[info] - should split a flow with overlapping patterns
1807[info] - should split a flow with complex overlapping patterns
1808[info] - should handle empty delimiter by returning entire input as single chunk
1809[info] - should handle empty delimiter with empty input
1810[info] - should split a flow with string elements
1811[info] - should split a flow with multi-element string delimiter
1812[info] - should handle delimiter longer than input
1813[info] - should handle single element matching start of multi-element delimiter
1814[info] - should handle partial delimiter match at end
1815[info] - should split with delimiter that appears multiple times in sequence
1816[info] - should handle error propagation
1817[info] - should split a large flow efficiently
1818[info] - should handle repeated delimiter pattern correctly
1819[info] - should properly split when given a flow with delimiter patterns
1820[info] - should handle erroneous scenarios when delimiter processing fails
1821[info] FlowOpsMergeTest:
1822[info] merge
1823[info] - should merge two simple flows
1824[info] - should merge two async flows
1825[info] - should merge with a tick flow
1826[info] - should propagate error from the left
1827[info] - should propagate error from the right
1828[info] - should merge two flows, emitting all elements from the left when right completes
1829[info] - should merge two flows, emitting all elements from the right when left completes
1830[info] - should merge two flows, completing the resulting flow when the left flow completes
1831[info] - should merge two flows, completing the resulting flow when the right flow completes
1832[info] FlowOpsFlatMapTest:
1833[info] flatMap
1834[info] - should flatten simple flows
1835[info] - should propagate errors
1836[info] FlowCompanionIOOpsTest:
1837[info] fromInputStream
1838[info] - should handle an empty InputStream
1839[info] - should handle InputStream shorter than buffer size
1840[info] - should handle InputStream longer than buffer size
1841[info] - should close the InputStream after reading it
1842[info] - should close the InputStream after failing with an exception
1843[info] fromFile
1844[info] - should read content from a file smaller than chunk size
1845[info] - should read content from a file larger than chunk size
1846[info] - should handle an empty file
1847[info] - should throw an exception for missing file
1848[info] - should throw an exception if path is a directory
1849[info] CollectParTest:
1850[info] collectPar
1851[info] - should output the same type as input
1852[info] - should run computations in parallel
1853[info] - should run not more computations than limit
1854[2026-01-08T00:31:19.246663708Z] [5995] exception
1855[2026-01-08T00:31:19.247782143Z] [24] catch
1856[2026-01-08T00:31:19.548013423Z] [24] all done
1857[info] - should interrupt other computations in one fails
1858[info] FlowOpsFutureTest:
1859[info] future
1860[info] - should return the original future failure when future fails
1861[info] - should return future value
1862[info] FlowOpsInterleaveTest:
1863[info] interleave
1864[info] - should interleave with an empty source
1865[info] - should interleave two sources with default segment size
1866[info] - should interleave two sources with default segment size and different lengths
1867[info] - should interleave two sources with custom segment size
1868[info] - should interleave two sources with custom segment size and different lengths
1869[info] - should interleave two sources with different lengths and complete eagerly
1870[info] - should when empty, interleave with a non-empty source and complete eagerly
1871[info] - should interleave with an empty source and complete eagerly
1872[info] ParTest:
1873[info] par
1874[2026-01-08T00:31:19.671103846Z] [6025] b
1875[2026-01-08T00:31:19.771101645Z] [6024] a
1876[2026-01-08T00:31:19.771473070Z] [24] done
1877[info] - should run computations in parallel
1878[2026-01-08T00:31:19.873335979Z] [6028] exception
1879[2026-01-08T00:31:19.873873199Z] [24] catch
1880[2026-01-08T00:31:20.174107435Z] [24] all done
1881[info] - should interrupt other computations in one fails
1882[info] parLimit
1883[info] - should run up to the given number of computations in parallel
1884[2026-01-08T00:31:20.880917945Z] [6040] x
1885[2026-01-08T00:31:20.880926096Z] [6041] x
1886[2026-01-08T00:31:20.891294706Z] [6043] exception
1887[2026-01-08T00:31:20.891695244Z] [24] catch
1888[2026-01-08T00:31:21.191878755Z] [24] all done
1889[info] - should interrupt other computations in one fails
1890[info] parEither
1891[2026-01-08T00:31:21.293813458Z] [6047] b
1892[2026-01-08T00:31:21.393819625Z] [6046] a
1893[2026-01-08T00:31:21.394677270Z] [24] done
1894[info] - should run computations in parallel
1895[2026-01-08T00:31:21.496075075Z] [6050] exception
1896[2026-01-08T00:31:21.796828301Z] [24] all done
1897[info] - should interrupt other computations in one fails
1898[info] SelectWithinTest:
1899[info] selectWithin
1900[info] - should select a clause that can complete immediately
1901[info] - should throw TimeoutException when no clause can complete within the timeout
1902[info] - should select a source that has a value immediately
1903[info] - should throw TimeoutException when no source has a value within the timeout
1904[info] - should work with single clause
1905[info] - should work with three clauses
1906[info] - should work with four clauses
1907[info] - should work with five clauses
1908[info] - should work with sequence of clauses
1909[info] selectWithin with sources
1910[info] - should work with single source
1911[info] - should work with two sources
1912[info] - should work with three sources
1913[info] - should work with four sources
1914[info] - should work with five sources
1915[info] - should work with sequence of sources
1916[info] selectWithin timeout scenarios
1917[info] - should throw TimeoutException for single clause timeout
1918[info] - should throw TimeoutException for single source timeout
1919[info] - should throw TimeoutException for sequence of clauses timeout
1920[info] - should throw TimeoutException for sequence of sources timeout
1921[info] - should throw TimeoutException immediately for empty sequence of clauses
1922[info] - should throw TimeoutException immediately for empty sequence of sources
1923[info] selectWithin error scenarios
1924[info] - should throw ChannelClosedException when channel is closed with done
1925[info] - should throw ChannelClosedException when channel is closed with error
1926[info] - should prioritize ready channels over closed ones
1927[info] selectWithin performance
1928[info] - should not timeout when clause can complete immediately
1929[info] - should respect timeout duration
1930[info] selectWithin with send clauses
1931[info] - should work with send clauses
1932[info] - should throw TimeoutException when send clauses cannot complete
1933[info] ImmediateRepeatTest:
1934[info] repeat
1935[info] - should repeat a function immediately
1936[info] - should repeat a function immediately with initial delay
1937[info] - should repeat a function immediately forever
1938[info] - should repeat a function immediately forever with initial delay
1939[info] FlowOpsDebounceByTest:
1940[info] debounceBy
1941[info] - should not debounce if applied on an empty flow
1942[info] - should not debounce if applied on a flow containing only distinct f(value)
1943[info] - should debounce if applied on a flow containing repeating f(value)
1944[info] - should debounce subsequent odd/prime numbers
1945[info] RaceTest:
1946[info] timeout
1947[2026-01-08T00:31:23.177857385Z] [24] timeout
1948[2026-01-08T00:31:23.178017262Z] [24] done
1949[info] - should short-circuit a long computation
1950[2026-01-08T00:31:25.679694176Z] [6112] no timeout
1951[2026-01-08T00:31:25.680104373Z] [24] done
1952[info] - should not interrupt a short computation
1953[info] timeoutOption
1954[2026-01-08T00:31:28.682199069Z] [24] done: None
1955[info] - should short-circuit a long computation
1956[info] race
1957[2026-01-08T00:31:31.184089854Z] [6117] fast
1958[info] - should race a slower and faster computation
1959[2026-01-08T00:31:32.686672987Z] [6118] fast
1960[info] - should race a faster and slower computation
1961[2026-01-08T00:31:33.888835863Z] [6120] error
1962[2026-01-08T00:31:34.188821311Z] [6121] slow
1963[info] - should return the first successful computation to complete
1964[info] - should add other exceptions as suppressed
1965[info] - should treat ControlThrowable as a non-fatal exception
1966[info] - should immediately rethrow other fatal exceptions
1967[info] raceEither
1968[2026-01-08T00:31:35.795813941Z] [6131] error
1969[2026-01-08T00:31:36.095823168Z] [6132] slow
1970[info] - should return the first successful computation to complete
1971[info] raceResult
1972[info] - should immediately return when a normal exception occurs
1973[info] - should immediately return when a control exception occurs
1974[info] - should immediately return when a fatal exception occurs
1975[info] SourceOpsFutureSourceTest:
1976[info] SourceOps.futureSource
1977[info] - should return the original future failure when future fails
1978[info] - should return the original future failure when future fails with ExecutionException
1979[info] - should return future's source values
1980[info] FilterParTest:
1981[info] filterPar
1982[info] - should output the same type as input
1983[info] - should run computations in parallel
1984[info] - should run not more computations than limit
1985[2026-01-08T00:31:37.848735073Z] [6337] exception
1986[2026-01-08T00:31:37.849225717Z] [24] catch
1987[2026-01-08T00:31:38.149428014Z] [24] all done
1988[info] - should interrupt other computations in one fails
1989[info] SourceOpsForeachTest:
1990[info] Source.foreach
1991[info] - should iterate over a source
1992[info] - should iterate over a source using for-syntax
1993[info] - should convert source to a list
1994[info] FlowOpsEmptyTest:
1995[info] empty
1996[info] - should be empty
1997[info] SupervisedTest:
1998[info] supervised
1999[2026-01-08T00:31:38.259489413Z] [6343] b
2000[2026-01-08T00:31:38.359324726Z] [6342] a
2001[2026-01-08T00:31:38.359666155Z] [24] done
2002[info] - should wait until all forks complete
2003[2026-01-08T00:31:38.461235366Z] [6346] b
2004[2026-01-08T00:31:38.461619973Z] [24] done
2005[info] - should only wait until user forks complete
2006[2026-01-08T00:31:38.563418128Z] [6350] b
2007[2026-01-08T00:31:38.663984575Z] [24] done
2008[info] - should interrupt once any fork ends with an exception
2009[2026-01-08T00:31:38.866130271Z] [24] done
2010[info] - should interrupt main body once a fork ends with an exception
2011[2026-01-08T00:31:38.967867515Z] [6356] b
2012[2026-01-08T00:31:39.167616463Z] [6354] a
2013[2026-01-08T00:31:39.167852686Z] [24] done
2014[info] - should not interrupt if an unsupervised fork ends with an exception
2015[info] - should handle interruption of multiple forks with `joinEither` correctly
2016[info] SourceOpsFutureTest:
2017[info] Source.future
2018[info] - should return the original future failure when future fails
2019[info] - should return the original future failure when future fails with ExecutionException
2020[info] - should return future value
2021[info] CircuitBreakerStateMachineTest:
2022[info] Circuit Breaker state machine
2023[info] - should keep closed with healthy metrics
2024[info] - should go to open after surpassing failure threshold
2025[info] - should go straight to half open after surpassing failure threshold with defined waitDurationOpenState = 0
2026[info] - should go back to open after timeout in half open passed
2027[info] - should update counter of completed operations in halfOpen state
2028[info] - should go back to closed after enough calls with good metrics are recorded
2029[info] - should go to open after enough calls with bad metrics are recorded in halfOpen state
2030[info] - should go to closed after enough calls with good metrics are recorded in halfOpen state
2031[info] - should go to half open after waitDurationOpenState passes
2032[info] OxAppTest:
2033[info] OxApp
2034[info] - should work in happy case
2035[info] OxApp
2036Clean shutdown timed out after 100 milliseconds, exiting.
2037[info] - should shutdown despite cleanup taking a long time
2038[info] OxApp
2039[info] - should work in interrupted case
2040[info] OxApp
2041[info] - should work in failed case
2042[info] OxApp
2043[info] - should report any non-interrupted exceptions that occur during shutdown
2044[info] OxApp.Simple
2045[info] - should work in happy case
2046[info] OxApp.Simple
2047[info] - should work in interrupted case
2048[info] OxApp.Simple
2049[info] - should work in failed case
2050[info] OxApp.WithErrors
2051[info] - should work in happy case
2052[info] OxApp.WithErrors
2053[info] - should work in interrupted case
2054[info] OxApp.WithErrors
2055[info] - should work in failed case
2056[info] FlowOpsMapStatefulConcatTest:
2057[info] mapStatefulConcat
2058[info] - should deduplicate
2059[info] - should count consecutive
2060[info] - should propagate errors in the mapping function
2061[info] - should propagate errors in the completion callback
2062[info] FlowOpsDropTest:
2063[info] drop
2064[info] - should not drop from the empty flow
2065[info] - should drop elements from the source
2066[info] - should return empty source when more elements than source length was dropped
2067[info] - should not drop when 'n == 0'
2068[info] FlowOpsRepeatEvalTest:
2069[info] repeatEval
2070[info] - should evaluate the element before each send
2071[info] - should evaluate the element before each send, as long as it's defined
2072[info] FlowPublisherPekkoTest:
2073[INFO] [01/08/2026 01:31:39.967] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2074[info] - a simple flow should emit elements to be processed by a pekko stream
2075[INFO] [01/08/2026 01:31:40.223] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2076[info] - a concurrent flow should emit elements to be processed by a pekko stream
2077[INFO] [01/08/2026 01:31:40.260] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2078[info] - create a flow from a simple publisher
2079[INFO] [01/08/2026 01:31:40.543] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2080[info] - create a flow from a concurrent publisher
2081[info] ImmediateRetryTest:
2082[info] Immediate retry
2083[info] - should retry a succeeding function
2084[info] - should fail fast when a function is not worth retrying
2085[info] - should retry a succeeding function with a custom success condition
2086[info] - should retry a failing function
2087[info] - should retry a failing function forever
2088[info] - should retry a succeeding Either
2089[info] - should fail fast when an Either is not worth retrying
2090[info] - should retry a succeeding Either with a custom success condition
2091[info] - should retry a failing Either
2092[info] Adaptive retry with immediate config
2093[info] - should retry a failing adaptive
2094[info] - should stop retrying after emptying bucket
2095[info] - should not pay exceptionCost if result T is going to be retried and shouldPayPenaltyCost returns false
2096[info] FlowOpsGroupByTest:
2097[info] groupBy
2098[info] - should handle empty flow
2099[info] - should handle single-element flow
2100[info] - should handle single-element flow (stress test)
2101[info] - should create simple groups without reaching parallelism limit
2102[info] - should complete groups when the parallelism limit is reached
2103[info] - should not exceed the parallelism limit, completing earliest-active child flows as done when necessary
2104[info] - should handle large flows
2105[info] - should handle non-integer grouping keys
2106[info] - should group when child processing is slow
2107[info] - should propagate errors from child flows
2108[info] - should propagate errors from child flows when the parent is blocked on sending
2109[info] - should RuntimeException errors from parent flows
2110[info] - should throw an IllegalStateException when a child stream is completed by user-provided transformation
2111[info] FlowOpsLastOptionTest:
2112[info] lastOption
2113[info] - should return None for the empty flow
2114[info] - should return Some for a non-empty
2115[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
2116[info] FlowOpsMapParUnorderedTest:
2117[info] mapParUnordered
2118[info] - should map over a source with parallelism limit 1
2119[info] - should map over a source with parallelism limit 2
2120[info] - should map over a source with parallelism limit 3
2121[info] - should map over a source with parallelism limit 4
2122[info] - should map over a source with parallelism limit 5
2123[info] - should map over a source with parallelism limit 6
2124[info] - should map over a source with parallelism limit 7
2125[info] - should map over a source with parallelism limit 8
2126[info] - should map over a source with parallelism limit 9
2127[info] - should map over a source with parallelism limit 10
2128[info] - should map over a source with parallelism limit 10 (stress test)
2129[info] + iteration 1
2130[info] + iteration 2
2131[info] + iteration 3
2132[info] + iteration 4
2133[info] + iteration 5
2134[info] + iteration 6
2135[info] + iteration 7
2136[info] + iteration 8
2137[info] + iteration 9
2138[info] + iteration 10
2139[info] + iteration 11
2140[info] + iteration 12
2141[info] + iteration 13
2142[info] + iteration 14
2143[info] + iteration 15
2144[info] + iteration 16
2145[info] + iteration 17
2146[info] + iteration 18
2147[info] + iteration 19
2148[info] + iteration 20
2149[info] + iteration 21
2150[info] + iteration 22
2151[info] + iteration 23
2152[info] + iteration 24
2153[info] + iteration 25
2154[info] + iteration 26
2155[info] + iteration 27
2156[info] + iteration 28
2157[info] + iteration 29
2158[info] + iteration 30
2159[info] + iteration 31
2160[info] + iteration 32
2161[info] + iteration 33
2162[info] + iteration 34
2163[info] + iteration 35
2164[info] + iteration 36
2165[info] + iteration 37
2166[info] + iteration 38
2167[info] + iteration 39
2168[info] + iteration 40
2169[info] + iteration 41
2170[info] + iteration 42
2171[info] + iteration 43
2172[info] + iteration 44
2173[info] + iteration 45
2174[info] + iteration 46
2175[info] + iteration 47
2176[info] + iteration 48
2177[info] + iteration 49
2178[info] + iteration 50
2179[info] + iteration 51
2180[info] + iteration 52
2181[info] + iteration 53
2182[info] + iteration 54
2183[info] + iteration 55
2184[info] + iteration 56
2185[info] + iteration 57
2186[info] + iteration 58
2187[info] + iteration 59
2188[info] + iteration 60
2189[info] + iteration 61
2190[info] + iteration 62
2191[info] + iteration 63
2192[info] + iteration 64
2193[info] + iteration 65
2194[info] + iteration 66
2195[info] + iteration 67
2196[info] + iteration 68
2197[info] + iteration 69
2198[info] + iteration 70
2199[info] + iteration 71
2200[info] + iteration 72
2201[info] + iteration 73
2202[info] + iteration 74
2203[info] + iteration 75
2204[info] + iteration 76
2205[info] + iteration 77
2206[info] + iteration 78
2207[info] + iteration 79
2208[info] + iteration 80
2209[info] + iteration 81
2210[info] + iteration 82
2211[info] + iteration 83
2212[info] + iteration 84
2213[info] + iteration 85
2214[info] + iteration 86
2215[info] + iteration 87
2216[info] + iteration 88
2217[info] + iteration 89
2218[info] + iteration 90
2219[info] + iteration 91
2220[info] + iteration 92
2221[info] + iteration 93
2222[info] + iteration 94
2223[info] + iteration 95
2224[info] + iteration 96
2225[info] + iteration 97
2226[info] + iteration 98
2227[info] + iteration 99
2228[info] + iteration 100
2229[info] - should propagate errors
2230[2026-01-08T00:31:56.387134611Z] [208018] done
2231[2026-01-08T00:31:56.387134578Z] [208017] done
2232[2026-01-08T00:31:56.487774211Z] [208020] exception
2233[info] - should complete running forks and not start new ones when the mapping function fails
2234[2026-01-08T00:31:56.791400872Z] [208025] 2
2235[2026-01-08T00:31:56.791410426Z] [208024] 1
2236[info] - should complete running forks and not start new ones when the upstream fails
2237[2026-01-08T00:31:57.205160282Z] [208032] done
2238[2026-01-08T00:31:57.205153411Z] [208031] done
2239[2026-01-08T00:31:57.305723881Z] [208034] exception
2240[info] - should cancel running forks when the surrounding scope closes due to an error
2241[info] - should emit downstream as soon as a value is ready, regardless of the incoming order
2242[info] ControlTest:
2243[info] timeout
2244[2026-01-08T00:31:58.813730581Z] [24] timeout
2245[2026-01-08T00:31:58.813872696Z] [24] done
2246[info] - should short-circuit a long computation
2247[info] - should pass through the exception of failed computation
2248[2026-01-08T00:32:00.916207783Z] [208048] no timeout
2249[2026-01-08T00:32:00.916560805Z] [24] done
2250[info] - should not interrupt a short computation
2251[2026-01-08T00:32:03.318134221Z] [208050] done
2252[info] - should block a thread indefinitely
2253[info] timeoutOption
2254[info] - should pass through the exception of failed computation
2255[info] timeoutEither
2256[info] - should pass through the exception of failed computation
2257[info] FlowOpsConcatTest:
2258[info] - should concatenate flows
2259[info] - should concatenate flows using ++
2260[info] - should not evaluate subsequent flows if there's a failure
2261[info] FlowOpsFactoryMethodsTest:
2262[info] factory methods
2263[info] - should create a flow from a fork
2264[info] - should create an iterating flow
2265[info] - should unfold a function
2266[info] - should produce a range
2267[info] ChannelTest:
2268[info] channel with capacity 0
2269[info] - should send and receive two spaced elements
2270[info] - should send and receive many elements, with concurrent senders & receivers
2271[info] - should select from two receives, if the last one has elements
2272[info] - should select from three receives, if the last one has elements
2273[info] - should select a receive from multiple channels
2274[info] - should select a receive until all channels are done
2275[info] - should properly report channel state
2276[info] - should select from a non-done channel, if a value is immediately available
2277[info] - should select a done channel, when the channel is done immediately
2278[info] - should select a done channel, when the channel becomes done
2279[info] channel with capacity 1
2280[info] - should send and receive two spaced elements
2281[info] - should send and receive many elements, with concurrent senders & receivers
2282[info] - should select from two receives, if the last one has elements
2283[info] - should select from three receives, if the last one has elements
2284[info] - should select a receive from multiple channels
2285[info] - should select a receive until all channels are done
2286[info] - should properly report channel state
2287[info] - should select from a non-done channel, if a value is immediately available
2288[info] - should select a done channel, when the channel is done immediately
2289[info] - should select a done channel, when the channel becomes done
2290[info] channel with capacity 2
2291[info] - should send and receive two spaced elements
2292[info] - should send and receive many elements, with concurrent senders & receivers
2293[info] - should select from two receives, if the last one has elements
2294[info] - should select from three receives, if the last one has elements
2295[info] - should select a receive from multiple channels
2296[info] - should select a receive until all channels are done
2297[info] - should properly report channel state
2298[info] - should select from a non-done channel, if a value is immediately available
2299[info] - should select a done channel, when the channel is done immediately
2300[info] - should select a done channel, when the channel becomes done
2301[info] channel with capacity 100
2302[info] - should send and receive two spaced elements
2303[info] - should send and receive many elements, with concurrent senders & receivers
2304[info] - should select from two receives, if the last one has elements
2305[info] - should select from three receives, if the last one has elements
2306[info] - should select a receive from multiple channels
2307[info] - should select a receive until all channels are done
2308[info] - should properly report channel state
2309[info] - should select from a non-done channel, if a value is immediately available
2310[info] - should select a done channel, when the channel is done immediately
2311[info] - should select a done channel, when the channel becomes done
2312[info] channel with capacity 10000
2313[info] - should send and receive two spaced elements
2314[info] - should send and receive many elements, with concurrent senders & receivers
2315[info] - should select from two receives, if the last one has elements
2316[info] - should select from three receives, if the last one has elements
2317[info] - should select a receive from multiple channels
2318[info] - should select a receive until all channels are done
2319[info] - should properly report channel state
2320[info] - should select from a non-done channel, if a value is immediately available
2321[info] - should select a done channel, when the channel is done immediately
2322[info] - should select a done channel, when the channel becomes done
2323[info] buffered channel
2324[info] - should select a send when one is available
2325[info] channel
2326[info] - should receive from a channel until done
2327[info] - should not receive from a channel in case of an error
2328[info] rendezvous channel
2329[info] - should wait until elements are transmitted
2330[info] - should select a send when a receive is waiting
2331[info] - should select a send or receive depending on availability
2332[info] default
2333[info] - should use the default value if the clauses are not satisfiable
2334[info] - should not use the default value if a clause is satisfiable
2335[info] - should not use the default value if the channel is done
2336[info] - should use the default value once a source is done (buffered channel, stress test)
2337[info] + iteration 1
2338[info] + iteration 2
2339[info] + iteration 3
2340[info] + iteration 4
2341[info] + iteration 5
2342[info] + iteration 6
2343[info] + iteration 7
2344[info] + iteration 8
2345[info] + iteration 9
2346[info] + iteration 10
2347[info] + iteration 11
2348[info] + iteration 12
2349[info] + iteration 13
2350[info] + iteration 14
2351[info] + iteration 15
2352[info] + iteration 16
2353[info] + iteration 17
2354[info] + iteration 18
2355[info] + iteration 19
2356[info] + iteration 20
2357[info] + iteration 21
2358[info] + iteration 22
2359[info] + iteration 23
2360[info] + iteration 24
2361[info] + iteration 25
2362[info] + iteration 26
2363[info] + iteration 27
2364[info] + iteration 28
2365[info] + iteration 29
2366[info] + iteration 30
2367[info] + iteration 31
2368[info] + iteration 32
2369[info] + iteration 33
2370[info] + iteration 34
2371[info] + iteration 35
2372[info] + iteration 36
2373[info] + iteration 37
2374[info] + iteration 38
2375[info] + iteration 39
2376[info] + iteration 40
2377[info] + iteration 41
2378[info] + iteration 42
2379[info] + iteration 43
2380[info] + iteration 44
2381[info] + iteration 45
2382[info] + iteration 46
2383[info] + iteration 47
2384[info] + iteration 48
2385[info] + iteration 49
2386[info] + iteration 50
2387[info] + iteration 51
2388[info] + iteration 52
2389[info] + iteration 53
2390[info] + iteration 54
2391[info] + iteration 55
2392[info] + iteration 56
2393[info] + iteration 57
2394[info] + iteration 58
2395[info] + iteration 59
2396[info] + iteration 60
2397[info] + iteration 61
2398[info] + iteration 62
2399[info] + iteration 63
2400[info] + iteration 64
2401[info] + iteration 65
2402[info] + iteration 66
2403[info] + iteration 67
2404[info] + iteration 68
2405[info] + iteration 69
2406[info] + iteration 70
2407[info] + iteration 71
2408[info] + iteration 72
2409[info] + iteration 73
2410[info] + iteration 74
2411[info] + iteration 75
2412[info] + iteration 76
2413[info] + iteration 77
2414[info] + iteration 78
2415[info] + iteration 79
2416[info] + iteration 80
2417[info] + iteration 81
2418[info] + iteration 82
2419[info] + iteration 83
2420[info] + iteration 84
2421[info] + iteration 85
2422[info] + iteration 86
2423[info] + iteration 87
2424[info] + iteration 88
2425[info] + iteration 89
2426[info] + iteration 90
2427[info] + iteration 91
2428[info] + iteration 92
2429[info] + iteration 93
2430[info] + iteration 94
2431[info] + iteration 95
2432[info] + iteration 96
2433[info] + iteration 97
2434[info] + iteration 98
2435[info] + iteration 99
2436[info] + iteration 100
2437[info] FlowOpsOrElseTest:
2438[info] orElse
2439[info] - should emit elements only from the original source when it is not empty
2440[info] - should emit elements only from the alternative source when the original source is created empty
2441[info] - should emit elements only from the alternative source when the original source is empty
2442[info] - should return failed source when the original source is failed
2443[info] FlowOpsForeachTest:
2444[info] foreach
2445[info] - should iterate over a flow
2446[info] - should convert flow to a list
2447[info] SourceOpsEmptyTest:
2448[info] Source.empty
2449[info] - should be done
2450[info] - should be empty
2451[info] FlowOpsFlattenParTest:
2452[info] flattenPar
2453[info] - should pipe all elements of the child flows into the output flow
2454[info] - should handle empty flow
2455[info] - should handle singleton flow
2456[info] - should not flatten nested flows
2457[info] - should handle subsequent flatten calls
2458[info] - should run at most parallelism child flows
2459[info] - should pipe elements realtime
2460[info] - should propagate error of any of the child flows and stop piping
2461[info] - should propagate error of the parent flow and stop piping
2462[info] FlowOpsRetryTest:
2463[info] Flow.retry
2464[info] - should successfully run a flow without retries when no errors occur
2465[info] - should retry a failing flow with immediate schedule
2466[info] - should retry a failing flow with fixed interval schedule
2467[info] - should not retry a flow which fails downstream
2468[info] - should fail after exhausting all retry attempts
2469[info] - should use custom ResultPolicy to determine retry worthiness
2470[info] - should handle empty flows correctly
2471[info] - should handle flows that complete successfully on first attempt
2472[info] - should retry the entire flow when processing fails
2473[info] - should work with complex flows containing transformations
2474[info] - should not retry a flow which uses .take and control exceptions
2475[info] LocalTest:
2476[info] fork locals
2477[2026-01-08T00:32:07.623006064Z] [24] main mid
2478[2026-01-08T00:32:07.723803266Z] [313242] In f1 = x
2479[2026-01-08T00:32:07.724117354Z] [24] result = a
2480[2026-01-08T00:32:07.824052968Z] [313245] In f3 = z
2481[2026-01-08T00:32:07.824395042Z] [24] result = a
2482[info] - should properly propagate values using supervisedWhere
2483[2026-01-08T00:32:07.825753340Z] [24] main mid
2484[2026-01-08T00:32:07.926193175Z] [313246] In f1 = x
2485[2026-01-08T00:32:07.926474650Z] [24] result = a
2486[2026-01-08T00:32:08.026873770Z] [313248] In f3 = z
2487[2026-01-08T00:32:08.027163999Z] [24] result = a
2488[info] - should properly propagate values using unsupervisedWhere
2489[2026-01-08T00:32:08.028737269Z] [313250] nested1 = x
2490[2026-01-08T00:32:08.029331778Z] [313251] nested2 = x
2491[2026-01-08T00:32:08.029511077Z] [24] outer = a
2492[info] - should propagate values across multiple scopes
2493[info] - should propagate errors from forks created within local values
2494[2026-01-08T00:32:08.031370347Z] [24] v1
2495[2026-01-08T00:32:08.031666228Z] [24] v2
2496[2026-01-08T00:32:08.031790369Z] [24] RuntimeException
2497[2026-01-08T00:32:08.031845972Z] [24] v1
2498[info] - should correctly set & unset fork locals when an exception is thrown
2499[2026-01-08T00:32:08.032334141Z] [24] v1_1
2500[2026-01-08T00:32:08.032439817Z] [24] v2_1
2501[2026-01-08T00:32:08.032927505Z] [24] v1_2
2502[2026-01-08T00:32:08.032989150Z] [24] v2_2
2503[2026-01-08T00:32:08.033054241Z] [24] v1_1
2504[2026-01-08T00:32:08.033104214Z] [24] v2_1
2505[info] - should correctly set & unset multiple fork locals
2506[info] FlowOpsSampleTest:
2507[info] sample
2508[info] - should not sample anything from an empty flow
2509[info] - should not sample anything when 'n == 0'
2510[info] - should sample every element of the flow when 'n == 1'
2511[info] - should sample every nth element of the flow
2512[info] FlowOpsDrainTest:
2513[info] drain
2514[info] - should drain all elements
2515[info] - should run any side-effects that are part of the flow
2516[info] - should merge with another flow
2517[info] ActorTest:
2518[info] - should invoke methods on the actor
2519[info] - should protect the internal state of the actor
2520[info] - should run the close callback before re-throwing the exception
2521[info] - should end the scope when an exception is thrown when handling .tell
2522[info] - should throw a channel closed exception when the actor's scope becomes closed
2523[info] FlowOpsSlidingTest:
2524[info] sliding
2525[info] - should create sliding windows for n = 2 and step = 1
2526[info] - should create sliding windows for n = 3 and step = 1
2527[info] - should create sliding windows for n = 2 and step = 2
2528[info] - should create sliding windows for n = 3 and step = 2
2529[info] - should create sliding windows for n = 1 and step = 2
2530[info] - should create sliding windows for n = 2 and step = 3
2531[info] - should create sliding windows for n = 2 and step = 3 (with 1 element remaining in the end)
2532[info] - should return failed source when the original source is failed
2533[info] FlowOpsTickTest:
2534[info] - should tick regularly
2535[info] - should tick immediately in case of a slow consumer, and then resume normal
2536Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
2537Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0-RC6/classes ...
2539[info] done compiling
2540[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0-RC6/test-classes ...
2541[info] done compiling
2542[info] CronScheduleTest:
2543[info] repeat with cron schedule
2544[info] - should repeat a function every second (once)
2545[info] - should repeat a function every second (three times)
2546[info] - should provide initial delay
2547Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
2548Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2549[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.8.0-RC6/classes ...
2550[info] done compiling
2551Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
2552Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2553[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.8.0-RC6/classes ...
2554[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
2555[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
2556[warn] | ^^^^^^^
2557[warn] | unused explicit parameter
2558[warn] one warning found
2559[info] done compiling
2560[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.8.0-RC6/test-classes ...
2561[info] done compiling
256201:32:20.000 [pool-67-thread-9] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
2563 add.partitions.to.txn.retry.backoff.max.ms = 100
2564 add.partitions.to.txn.retry.backoff.ms = 20
2565 advertised.listeners = BROKER://localhost:6001
2566 alter.config.policy.class.name = null
2567 alter.log.dirs.replication.quota.window.num = 11
2568 alter.log.dirs.replication.quota.window.size.seconds = 1
2569 authorizer.class.name =
2570 auto.create.topics.enable = true
2571 auto.leader.rebalance.enable = true
2572 background.threads = 10
2573 broker.heartbeat.interval.ms = 2000
2574 broker.id = 0
2575 broker.rack = null
2576 broker.session.timeout.ms = 9000
2577 client.quota.callback.class = null
2578 compression.gzip.level = -1
2579 compression.lz4.level = 9
2580 compression.type = producer
2581 compression.zstd.level = 3
2582 connection.failed.authentication.delay.ms = 100
2583 connections.max.idle.ms = 600000
2584 connections.max.reauth.ms = 0
2585 controlled.shutdown.enable = true
2586 controller.listener.names = CONTROLLER
2587 controller.performance.always.log.threshold.ms = 2000
2588 controller.performance.sample.period.ms = 60000
2589 controller.quorum.append.linger.ms = 25
2590 controller.quorum.bootstrap.servers = []
2591 controller.quorum.election.backoff.max.ms = 1000
2592 controller.quorum.election.timeout.ms = 1000
2593 controller.quorum.fetch.timeout.ms = 2000
2594 controller.quorum.request.timeout.ms = 2000
2595 controller.quorum.retry.backoff.ms = 20
2596 controller.quorum.voters = [0@localhost:6002]
2597 controller.quota.window.num = 11
2598 controller.quota.window.size.seconds = 1
2599 controller.socket.timeout.ms = 30000
2600 create.topic.policy.class.name = null
2601 default.replication.factor = 1
2602 delegation.token.expiry.check.interval.ms = 3600000
2603 delegation.token.expiry.time.ms = 86400000
2604 delegation.token.max.lifetime.ms = 604800000
2605 delegation.token.secret.key = null
2606 delete.records.purgatory.purge.interval.requests = 1
2607 delete.topic.enable = true
2608 early.start.listeners = null
2609 fetch.max.bytes = 57671680
2610 fetch.purgatory.purge.interval.requests = 1000
2611 group.consumer.assignors = [uniform, range]
2612 group.consumer.heartbeat.interval.ms = 5000
2613 group.consumer.max.heartbeat.interval.ms = 15000
2614 group.consumer.max.session.timeout.ms = 60000
2615 group.consumer.max.size = 2147483647
2616 group.consumer.migration.policy = bidirectional
2617 group.consumer.min.heartbeat.interval.ms = 5000
2618 group.consumer.min.session.timeout.ms = 45000
2619 group.consumer.regex.refresh.interval.ms = 600000
2620 group.consumer.session.timeout.ms = 45000
2621 group.coordinator.append.linger.ms = 5
2622 group.coordinator.rebalance.protocols = [classic, consumer, streams]
2623 group.coordinator.threads = 4
2624 group.initial.rebalance.delay.ms = 3000
2625 group.max.session.timeout.ms = 1800000
2626 group.max.size = 2147483647
2627 group.min.session.timeout.ms = 6000
2628 group.share.assignors = [simple]
2629 group.share.delivery.count.limit = 5
2630 group.share.enable = false
2631 group.share.heartbeat.interval.ms = 5000
2632 group.share.max.heartbeat.interval.ms = 15000
2633 group.share.max.record.lock.duration.ms = 60000
2634 group.share.max.session.timeout.ms = 60000
2635 group.share.max.share.sessions = 2000
2636 group.share.max.size = 200
2637 group.share.min.heartbeat.interval.ms = 5000
2638 group.share.min.record.lock.duration.ms = 15000
2639 group.share.min.session.timeout.ms = 45000
2640 group.share.partition.max.record.locks = 2000
2641 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
2642 group.share.record.lock.duration.ms = 30000
2643 group.share.session.timeout.ms = 45000
2644 group.streams.heartbeat.interval.ms = 5000
2645 group.streams.max.heartbeat.interval.ms = 15000
2646 group.streams.max.session.timeout.ms = 60000
2647 group.streams.max.size = 2147483647
2648 group.streams.max.standby.replicas = 2
2649 group.streams.min.heartbeat.interval.ms = 5000
2650 group.streams.min.session.timeout.ms = 45000
2651 group.streams.num.standby.replicas = 0
2652 group.streams.session.timeout.ms = 45000
2653 initial.broker.registration.timeout.ms = 60000
2654 inter.broker.listener.name = BROKER
2655 internal.metadata.delete.delay.millis = 60000
2656 internal.metadata.log.segment.bytes = null
2657 internal.metadata.max.batch.size.in.bytes = 8388608
2658 internal.metadata.max.fetch.size.in.bytes = 8388608
2659 kafka.metrics.polling.interval.secs = 10
2660 kafka.metrics.reporters = []
2661 leader.imbalance.check.interval.seconds = 300
2662 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
2663 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
2664 log.cleaner.backoff.ms = 15000
2665 log.cleaner.dedupe.buffer.size = 1048577
2666 log.cleaner.delete.retention.ms = 86400000
2667 log.cleaner.enable = true
2668 log.cleaner.io.buffer.load.factor = 0.9
2669 log.cleaner.io.buffer.size = 524288
2670 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
2671 log.cleaner.max.compaction.lag.ms = 9223372036854775807
2672 log.cleaner.min.cleanable.ratio = 0.5
2673 log.cleaner.min.compaction.lag.ms = 0
2674 log.cleaner.threads = 1
2675 log.cleanup.policy = [delete]
2676 log.dir = /tmp/kafka-logs
2677 log.dir.failure.timeout.ms = 30000
2678 log.dirs = /tmp/kafka-logs15769196062054598040
2679 log.flush.interval.messages = 1
2680 log.flush.interval.ms = null
2681 log.flush.offset.checkpoint.interval.ms = 60000
2682 log.flush.scheduler.interval.ms = 9223372036854775807
2683 log.flush.start.offset.checkpoint.interval.ms = 60000
2684 log.index.interval.bytes = 4096
2685 log.index.size.max.bytes = 10485760
2686 log.initial.task.delay.ms = 30000
2687 log.local.retention.bytes = -2
2688 log.local.retention.ms = -2
2689 log.message.timestamp.after.max.ms = 3600000
2690 log.message.timestamp.before.max.ms = 9223372036854775807
2691 log.message.timestamp.type = CreateTime
2692 log.preallocate = false
2693 log.retention.bytes = -1
2694 log.retention.check.interval.ms = 300000
2695 log.retention.hours = 168
2696 log.retention.minutes = null
2697 log.retention.ms = null
2698 log.roll.hours = 168
2699 log.roll.jitter.hours = 0
2700 log.roll.jitter.ms = null
2701 log.roll.ms = null
2702 log.segment.bytes = 1073741824
2703 log.segment.delete.delay.ms = 60000
2704 max.connection.creation.rate = 2147483647
2705 max.connections = 2147483647
2706 max.connections.per.ip = 2147483647
2707 max.connections.per.ip.overrides =
2708 max.incremental.fetch.session.cache.slots = 1000
2709 max.request.partition.size.limit = 2000
2710 message.max.bytes = 1048588
2711 metadata.log.dir = null
2712 metadata.log.max.record.bytes.between.snapshots = 20971520
2713 metadata.log.max.snapshot.interval.ms = 3600000
2714 metadata.log.segment.bytes = 1073741824
2715 metadata.log.segment.ms = 604800000
2716 metadata.max.idle.interval.ms = 500
2717 metadata.max.retention.bytes = 104857600
2718 metadata.max.retention.ms = 604800000
2719 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
2720 metrics.num.samples = 2
2721 metrics.recording.level = INFO
2722 metrics.sample.window.ms = 30000
2723 min.insync.replicas = 1
2724 node.id = 0
2725 num.io.threads = 8
2726 num.network.threads = 3
2727 num.partitions = 1
2728 num.recovery.threads.per.data.dir = 2
2729 num.replica.alter.log.dirs.threads = null
2730 num.replica.fetchers = 1
2731 offset.metadata.max.bytes = 4096
2732 offsets.commit.timeout.ms = 5000
2733 offsets.load.buffer.size = 5242880
2734 offsets.retention.check.interval.ms = 600000
2735 offsets.retention.minutes = 10080
2736 offsets.topic.compression.codec = 0
2737 offsets.topic.num.partitions = 1
2738 offsets.topic.replication.factor = 1
2739 offsets.topic.segment.bytes = 104857600
2740 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
2741 process.roles = [broker, controller]
2742 producer.id.expiration.check.interval.ms = 600000
2743 producer.id.expiration.ms = 86400000
2744 producer.purgatory.purge.interval.requests = 1000
2745 queued.max.request.bytes = -1
2746 queued.max.requests = 500
2747 quota.window.num = 11
2748 quota.window.size.seconds = 1
2749 remote.fetch.max.wait.ms = 500
2750 remote.list.offsets.request.timeout.ms = 30000
2751 remote.log.index.file.cache.total.size.bytes = 1073741824
2752 remote.log.manager.copier.thread.pool.size = 10
2753 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
2754 remote.log.manager.copy.quota.window.num = 11
2755 remote.log.manager.copy.quota.window.size.seconds = 1
2756 remote.log.manager.expiration.thread.pool.size = 10
2757 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
2758 remote.log.manager.fetch.quota.window.num = 11
2759 remote.log.manager.fetch.quota.window.size.seconds = 1
2760 remote.log.manager.task.interval.ms = 30000
2761 remote.log.manager.task.retry.backoff.max.ms = 30000
2762 remote.log.manager.task.retry.backoff.ms = 500
2763 remote.log.manager.task.retry.jitter = 0.2
2764 remote.log.manager.thread.pool.size = 2
2765 remote.log.metadata.custom.metadata.max.bytes = 128
2766 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
2767 remote.log.metadata.manager.class.path = null
2768 remote.log.metadata.manager.impl.prefix = rlmm.config.
2769 remote.log.metadata.manager.listener.name = null
2770 remote.log.reader.max.pending.tasks = 100
2771 remote.log.reader.threads = 10
2772 remote.log.storage.manager.class.name = null
2773 remote.log.storage.manager.class.path = null
2774 remote.log.storage.manager.impl.prefix = rsm.config.
2775 remote.log.storage.system.enable = false
2776 replica.fetch.backoff.ms = 1000
2777 replica.fetch.max.bytes = 1048576
2778 replica.fetch.min.bytes = 1
2779 replica.fetch.response.max.bytes = 10485760
2780 replica.fetch.wait.max.ms = 500
2781 replica.high.watermark.checkpoint.interval.ms = 5000
2782 replica.lag.time.max.ms = 30000
2783 replica.selector.class = null
2784 replica.socket.receive.buffer.bytes = 65536
2785 replica.socket.timeout.ms = 30000
2786 replication.quota.window.num = 11
2787 replication.quota.window.size.seconds = 1
2788 request.timeout.ms = 30000
2789 sasl.client.callback.handler.class = null
2790 sasl.enabled.mechanisms = [GSSAPI]
2791 sasl.jaas.config = null
2792 sasl.kerberos.kinit.cmd = /usr/bin/kinit
2793 sasl.kerberos.min.time.before.relogin = 60000
2794 sasl.kerberos.principal.to.local.rules = [DEFAULT]
2795 sasl.kerberos.service.name = null
2796 sasl.kerberos.ticket.renew.jitter = 0.05
2797 sasl.kerberos.ticket.renew.window.factor = 0.8
2798 sasl.login.callback.handler.class = null
2799 sasl.login.class = null
2800 sasl.login.connect.timeout.ms = null
2801 sasl.login.read.timeout.ms = null
2802 sasl.login.refresh.buffer.seconds = 300
2803 sasl.login.refresh.min.period.seconds = 60
2804 sasl.login.refresh.window.factor = 0.8
2805 sasl.login.refresh.window.jitter = 0.05
2806 sasl.login.retry.backoff.max.ms = 10000
2807 sasl.login.retry.backoff.ms = 100
2808 sasl.mechanism.controller.protocol = GSSAPI
2809 sasl.mechanism.inter.broker.protocol = GSSAPI
2810 sasl.oauthbearer.assertion.algorithm = RS256
2811 sasl.oauthbearer.assertion.claim.aud = null
2812 sasl.oauthbearer.assertion.claim.exp.seconds = 300
2813 sasl.oauthbearer.assertion.claim.iss = null
2814 sasl.oauthbearer.assertion.claim.jti.include = false
2815 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
2816 sasl.oauthbearer.assertion.claim.sub = null
2817 sasl.oauthbearer.assertion.file = null
2818 sasl.oauthbearer.assertion.private.key.file = null
2819 sasl.oauthbearer.assertion.private.key.passphrase = null
2820 sasl.oauthbearer.assertion.template.file = null
2821 sasl.oauthbearer.client.credentials.client.id = null
2822 sasl.oauthbearer.client.credentials.client.secret = null
2823 sasl.oauthbearer.clock.skew.seconds = 30
2824 sasl.oauthbearer.expected.audience = null
2825 sasl.oauthbearer.expected.issuer = null
2826 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
2827 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
2828 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
2829 sasl.oauthbearer.jwks.endpoint.url = null
2830 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
2831 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
2832 sasl.oauthbearer.scope = null
2833 sasl.oauthbearer.scope.claim.name = scope
2834 sasl.oauthbearer.sub.claim.name = sub
2835 sasl.oauthbearer.token.endpoint.url = null
2836 sasl.server.callback.handler.class = null
2837 sasl.server.max.receive.size = 524288
2838 security.inter.broker.protocol = PLAINTEXT
2839 security.providers = null
2840 server.max.startup.time.ms = 9223372036854775807
2841 share.coordinator.append.linger.ms = 5
2842 share.coordinator.cold.partition.snapshot.interval.ms = 300000
2843 share.coordinator.load.buffer.size = 5242880
2844 share.coordinator.snapshot.update.records.per.snapshot = 500
2845 share.coordinator.state.topic.compression.codec = 0
2846 share.coordinator.state.topic.min.isr = 2
2847 share.coordinator.state.topic.num.partitions = 50
2848 share.coordinator.state.topic.prune.interval.ms = 300000
2849 share.coordinator.state.topic.replication.factor = 3
2850 share.coordinator.state.topic.segment.bytes = 104857600
2851 share.coordinator.threads = 1
2852 share.coordinator.write.timeout.ms = 5000
2853 share.fetch.purgatory.purge.interval.requests = 1000
2854 socket.connection.setup.timeout.max.ms = 30000
2855 socket.connection.setup.timeout.ms = 10000
2856 socket.listen.backlog.size = 50
2857 socket.receive.buffer.bytes = 102400
2858 socket.request.max.bytes = 104857600
2859 socket.send.buffer.bytes = 102400
2860 ssl.allow.dn.changes = false
2861 ssl.allow.san.changes = false
2862 ssl.cipher.suites = []
2863 ssl.client.auth = none
2864 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
2865 ssl.endpoint.identification.algorithm = https
2866 ssl.engine.factory.class = null
2867 ssl.key.password = null
2868 ssl.keymanager.algorithm = SunX509
2869 ssl.keystore.certificate.chain = null
2870 ssl.keystore.key = null
2871 ssl.keystore.location = null
2872 ssl.keystore.password = null
2873 ssl.keystore.type = JKS
2874 ssl.principal.mapping.rules = DEFAULT
2875 ssl.protocol = TLSv1.3
2876 ssl.provider = null
2877 ssl.secure.random.implementation = null
2878 ssl.trustmanager.algorithm = PKIX
2879 ssl.truststore.certificates = null
2880 ssl.truststore.location = null
2881 ssl.truststore.password = null
2882 ssl.truststore.type = JKS
2883 telemetry.max.bytes = 1048576
2884 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
2885 transaction.max.timeout.ms = 900000
2886 transaction.partition.verification.enable = true
2887 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
2888 transaction.state.log.load.buffer.size = 5242880
2889 transaction.state.log.min.isr = 1
2890 transaction.state.log.num.partitions = 50
2891 transaction.state.log.replication.factor = 1
2892 transaction.state.log.segment.bytes = 104857600
2893 transaction.two.phase.commit.enable = false
2894 transactional.id.expiration.ms = 604800000
2895 unclean.leader.election.enable = false
2896 unclean.leader.election.interval.ms = 300000
2897 unstable.api.versions.enable = false
2898 unstable.feature.versions.enable = false
2899
290001:32:20.209 [pool-67-thread-9] INFO k.u.Log4jControllerRegistration$ - Registered `kafka:type=kafka.Log4jController` MBean
290101:32:20.265 [pool-67-thread-9] INFO i.g.e.EmbeddedKafka$ - [KafkaRaftServer nodeId=0] Rewriting /tmp/kafka-logs15769196062054598040/meta.properties
290201:32:20.324 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Starting controller
290301:32:20.642 [pool-67-thread-9] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
290401:32:20.670 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(CONTROLLER)
290501:32:20.675 [pool-67-thread-9] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint CONTROLLER. Endpoint is now READY.
290601:32:20.676 [pool-67-thread-9] INFO k.s.SharedServer - [SharedServer id=0] Starting SharedServer
290701:32:20.723 [pool-67-thread-9] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
290801:32:20.724 [pool-67-thread-9] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs15769196062054598040] Reloading from producer snapshot and rebuilding producer state from offset 0
290901:32:20.724 [pool-67-thread-9] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs15769196062054598040] Producer state recovery took 0ms for snapshot load and 0ms for segment recovery from offset 0
291001:32:20.742 [pool-67-thread-9] INFO k.r.KafkaMetadataLog$ - Initialized snapshots with IDs SortedSet() from /tmp/kafka-logs15769196062054598040/__cluster_metadata-0
291101:32:20.752 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Starting
291201:32:20.764 [pool-67-thread-9] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Reading KRaft snapshot and log as part of the initialization
291301:32:20.766 [pool-67-thread-9] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting voters are VoterSet(voters={0=VoterNode(voterKey=ReplicaKey(id=0, directoryId=<undefined>), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/127.0.0.1:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:0])})
291401:32:20.769 [pool-67-thread-9] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting request manager with static voters: [localhost:6002 (id: 0 rack: null isFenced: false)]
291501:32:20.773 [pool-67-thread-9] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1397, highWatermark=Optional.empty) from null
291601:32:20.848 [pool-67-thread-9] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1397, highWatermark=Optional.empty) from null
291701:32:20.851 [pool-67-thread-9] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1508, highWatermark=Optional.empty) from UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1397, highWatermark=Optional.empty)
291801:32:20.852 [pool-67-thread-9] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to CandidateState(localId=0, localDirectoryId=lLBEpjnhyLJNV8aO2iBtcg, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1171) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1508, highWatermark=Optional.empty)
291901:32:20.858 [pool-67-thread-9] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to CandidateState(localId=0, localDirectoryId=lLBEpjnhyLJNV8aO2iBtcg, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1171) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1508, highWatermark=Optional.empty)
292001:32:20.863 [pool-67-thread-9] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=lLBEpjnhyLJNV8aO2iBtcg), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=lLBEpjnhyLJNV8aO2iBtcg, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1171)
292101:32:20.864 [pool-67-thread-9] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=lLBEpjnhyLJNV8aO2iBtcg), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=lLBEpjnhyLJNV8aO2iBtcg, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1171)
292201:32:20.881 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Starting
292301:32:20.881 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Starting
292401:32:20.895 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] initializeNewPublishers: The loader is still catching up because we have loaded up to offset -1, but the high water mark is 1
292501:32:20.896 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for controller quorum voters future
292601:32:20.896 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for controller quorum voters future
292701:32:20.900 [kafka-0-raft-io-thread] INFO o.a.k.r.LeaderState - [RaftManager id=0] High watermark set to LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)]) for the first time for epoch 1 based on indexOfHw 0 and voters [ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional[LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)])], lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)]
292801:32:20.905 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.image.loader.MetadataLoader@1873605344
292901:32:20.906 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.image.loader.MetadataLoader@1873605344 to 0 since there are no snapshots
293001:32:20.908 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader is still catching up because we have not loaded a controller record as of offset 0 and high water mark is 1
293101:32:20.928 [pool-67-thread-9] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task writeNoOpRecord to run every 500 ms
293201:32:20.928 [pool-67-thread-9] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task maybeFenceStaleBroker to run every 1125 ms
293301:32:20.929 [pool-67-thread-9] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electPreferred to run every 300000 ms
293401:32:20.929 [pool-67-thread-9] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electUnclean to run every 300000 ms
293501:32:20.929 [pool-67-thread-9] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task expireDelegationTokens to run every 3600000 ms
293601:32:20.929 [pool-67-thread-9] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task generatePeriodicPerformanceMessage to run every 60000 ms
293701:32:20.930 [pool-67-thread-9] INFO o.a.k.c.QuorumController - [QuorumController id=0] Creating new QuorumController with clusterId 4oa31apqQtabsfPXH-H0RA
293801:32:20.930 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@409757838
293901:32:20.930 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@409757838 to 0 since there are no snapshots
294001:32:20.932 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] Becoming the active controller at epoch 1, next write offset 1.
294101:32:20.936 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Starting
294201:32:20.936 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Starting
294301:32:20.937 [quorum-controller-0-event-handler] WARN o.a.k.c.QuorumController - [QuorumController id=0] Performing controller activation. The metadata log appears to be empty. Appending 1 bootstrap record(s) in metadata transaction at metadata.version 4.1-IV1 from bootstrap source 'the default bootstrap'.
294401:32:20.938 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Starting
294501:32:20.939 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed BeginTransactionRecord(name='Bootstrap records') at offset 1.
294601:32:20.940 [quorum-controller-0-event-handler] INFO o.a.k.c.FeatureControlManager - [QuorumController id=0] Replayed a FeatureLevelRecord setting metadata.version to 4.1-IV1
294701:32:20.940 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed EndTransactionRecord() at offset 3.
294801:32:20.940 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Starting
294901:32:20.941 [quorum-controller-0-event-handler] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Activated periodic tasks: electPreferred, electUnclean, expireDelegationTokens, generatePeriodicPerformanceMessage, maybeFenceStaleBroker, writeNoOpRecord
295001:32:20.952 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
295101:32:20.965 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for the controller metadata publishers to be installed
295201:32:20.965 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] initializeNewPublishers: The loader is still catching up because we have not loaded a controller record as of offset 0 and high water mark is 1
295301:32:20.965 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for the controller metadata publishers to be installed
295401:32:20.965 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Enabling request processing.
295501:32:20.971 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader finished catching up to the current high water mark of 4
295601:32:20.973 [pool-67-thread-9] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6002.
295701:32:20.974 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing SnapshotGenerator with a snapshot at offset 3
295801:32:20.974 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing KRaftMetadataCachePublisher with a snapshot at offset 3
295901:32:20.974 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing FeaturesPublisher with a snapshot at offset 3
296001:32:20.981 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Starting
296101:32:20.981 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the authorizer futures to be completed
296201:32:20.981 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the authorizer futures to be completed
296301:32:20.981 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the SocketServer Acceptors to be started
296401:32:20.982 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
296501:32:20.982 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] initialized channel manager.
296601:32:20.982 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] maybeSendControllerRegistration: cannot register yet because the metadata.version is not known yet.
296701:32:20.982 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTDOWN to STARTING
296801:32:20.982 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Starting broker
296901:32:20.988 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
297001:32:20.988 [kafka-0-metadata-loader-event-handler] INFO o.a.k.m.p.FeaturesPublisher - [ControllerServer id=0] Loaded new metadata FinalizedFeatures[metadataVersion=4.1-IV1, finalizedFeatures={metadata.version=27}, finalizedFeaturesEpoch=3].
297101:32:20.988 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationsPublisher with a snapshot at offset 3
297201:32:20.988 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationManager with a snapshot at offset 3
297301:32:20.988 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicConfigPublisher controller id=0 with a snapshot at offset 3
297401:32:20.989 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicClientQuotaPublisher controller id=0 with a snapshot at offset 3
297501:32:20.991 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicTopicClusterQuotaPublisher controller id=0 with a snapshot at offset 3
297601:32:20.992 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ScramPublisher controller id=0 with a snapshot at offset 3
297701:32:20.992 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DelegationTokenPublisher controller id=0 with a snapshot at offset 3
297801:32:20.994 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerMetadataMetricsPublisher with a snapshot at offset 3
297901:32:20.994 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Starting
298001:32:20.994 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing AclPublisher controller id=0 with a snapshot at offset 3
298101:32:20.995 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Starting
298201:32:20.997 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Starting
298301:32:20.999 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Starting
298401:32:20.999 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] sendControllerRegistration: attempting to send ControllerRegistrationRequestData(controllerId=0, incarnationId=E5FeePitQvWsne22hoaE1A, zkMigrationReady=false, listeners=[Listener(name='CONTROLLER', host='localhost', port=6002, securityProtocol=0)], features=[Feature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), Feature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), Feature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)])
298501:32:21.019 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for controller quorum voters future
298601:32:21.019 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for controller quorum voters future
298701:32:21.023 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Starting
298801:32:21.023 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
298901:32:21.032 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Starting
299001:32:21.092 [pool-67-thread-9] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
299101:32:21.098 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(BROKER)
299201:32:21.102 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed RegisterControllerRecord containing ControllerRegistration(id=0, incarnationId=E5FeePitQvWsne22hoaE1A, zkMigrationReady=false, listeners=[Endpoint(listenerName='CONTROLLER', securityProtocol=PLAINTEXT, host='localhost', port=6002)], supportedFeatures={eligible.leader.replicas.version: 0-1, group.version: 0-1, kraft.version: 0-1, metadata.version: 7-27, share.version: 0-1, transaction.version: 0-2}).
299301:32:21.104 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Starting
299401:32:21.105 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299501:32:21.111 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Starting
299601:32:21.111 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299701:32:21.116 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] Our registration has been persisted to the metadata log.
299801:32:21.116 [controller-0-to-controller-registration-channel-manager] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] RegistrationResponseHandler: controller acknowledged ControllerRegistrationRequest.
299901:32:21.127 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Starting
300001:32:21.128 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Starting
300101:32:21.129 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Starting
300201:32:21.129 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Starting
300301:32:21.130 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Starting
300401:32:21.131 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Starting
300501:32:21.145 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Starting
300601:32:21.167 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Starting
300701:32:21.173 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Starting
300801:32:21.174 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Starting
300901:32:21.175 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Starting
301001:32:21.186 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Starting
301101:32:21.186 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Starting
301201:32:21.188 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Starting
301301:32:21.189 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Starting
301401:32:21.208 [pool-67-thread-9] INFO k.l.LogManager - Unable to read the broker epoch in /tmp/kafka-logs15769196062054598040.
301501:32:21.209 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Starting
301601:32:21.211 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
301701:32:21.214 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Incarnation Gcn3MmtLR3WalFXO7wSeuQ of broker 0 in cluster 4oa31apqQtabsfPXH-H0RA is now STARTING.
301801:32:21.218 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Starting
301901:32:21.222 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] No previous registration found for broker 0. New incarnation ID is Gcn3MmtLR3WalFXO7wSeuQ. Generated 0 record(s) to clean up previous incarnations. New broker epoch is 5.
302001:32:21.229 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed initial RegisterBrokerRecord for broker 0: RegisterBrokerRecord(brokerId=0, isMigratingZkBroker=false, incarnationId=Gcn3MmtLR3WalFXO7wSeuQ, brokerEpoch=5, endPoints=[BrokerEndpoint(name='BROKER', host='localhost', port=6001, securityProtocol=0)], features=[BrokerFeature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), BrokerFeature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), BrokerFeature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)], rack=null, fenced=true, inControlledShutdown=false, logDirs=[lLBEpjnhyLJNV8aO2iBtcg])
302101:32:21.233 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
302201:32:21.246 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker metadata publishers to be installed
302301:32:21.246 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker metadata publishers to be installed
302401:32:21.246 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the controller to acknowledge that we are caught up
302501:32:21.246 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing MetadataVersionPublisher(id=0) with a snapshot at offset 4
302601:32:21.247 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerMetadataPublisher with a snapshot at offset 4
302701:32:21.249 [kafka-0-metadata-loader-event-handler] INFO k.s.m.BrokerMetadataPublisher - [BrokerMetadataPublisher id=0] Publishing initial metadata at offset OffsetAndEpoch[offset=4, epoch=1] with metadata.version Optional[4.1-IV1].
302801:32:21.250 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loading logs from log dirs ArrayBuffer(/tmp/kafka-logs15769196062054598040)
302901:32:21.253 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - No logs found to be loaded in /tmp/kafka-logs15769196062054598040
303001:32:21.253 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Successfully registered broker 0 with broker epoch 5
303101:32:21.260 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loaded 0 logs in 9ms
303201:32:21.260 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log cleanup with a period of 300000 ms.
303301:32:21.261 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log flusher with a default period of 9223372036854775807 ms.
303401:32:21.267 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.LogCleaner - Starting the log cleaner
303501:32:21.272 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Starting
303601:32:21.276 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Starting
303701:32:21.277 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Starting
303801:32:21.280 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Starting up.
303901:32:21.281 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Startup complete.
304001:32:21.282 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Starting up.
304101:32:21.283 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Startup complete.
304201:32:21.283 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Starting up.
304301:32:21.283 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Startup complete.
304401:32:21.284 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Starting
304501:32:21.291 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerRegistrationTracker(id=0) with a snapshot at offset 4
304601:32:21.300 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has caught up. Transitioning from STARTING to RECOVERY.
304701:32:21.300 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the controller to acknowledge that we are caught up
304801:32:21.300 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the initial broker metadata update to be published
304901:32:21.300 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the initial broker metadata update to be published
305001:32:21.301 [pool-67-thread-9] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
3051 add.partitions.to.txn.retry.backoff.max.ms = 100
3052 add.partitions.to.txn.retry.backoff.ms = 20
3053 advertised.listeners = BROKER://localhost:6001
3054 alter.config.policy.class.name = null
3055 alter.log.dirs.replication.quota.window.num = 11
3056 alter.log.dirs.replication.quota.window.size.seconds = 1
3057 authorizer.class.name =
3058 auto.create.topics.enable = true
3059 auto.leader.rebalance.enable = true
3060 background.threads = 10
3061 broker.heartbeat.interval.ms = 2000
3062 broker.id = 0
3063 broker.rack = null
3064 broker.session.timeout.ms = 9000
3065 client.quota.callback.class = null
3066 compression.gzip.level = -1
3067 compression.lz4.level = 9
3068 compression.type = producer
3069 compression.zstd.level = 3
3070 connection.failed.authentication.delay.ms = 100
3071 connections.max.idle.ms = 600000
3072 connections.max.reauth.ms = 0
3073 controlled.shutdown.enable = true
3074 controller.listener.names = CONTROLLER
3075 controller.performance.always.log.threshold.ms = 2000
3076 controller.performance.sample.period.ms = 60000
3077 controller.quorum.append.linger.ms = 25
3078 controller.quorum.bootstrap.servers = []
3079 controller.quorum.election.backoff.max.ms = 1000
3080 controller.quorum.election.timeout.ms = 1000
3081 controller.quorum.fetch.timeout.ms = 2000
3082 controller.quorum.request.timeout.ms = 2000
3083 controller.quorum.retry.backoff.ms = 20
3084 controller.quorum.voters = [0@localhost:6002]
3085 controller.quota.window.num = 11
3086 controller.quota.window.size.seconds = 1
3087 controller.socket.timeout.ms = 30000
3088 create.topic.policy.class.name = null
3089 default.replication.factor = 1
3090 delegation.token.expiry.check.interval.ms = 3600000
3091 delegation.token.expiry.time.ms = 86400000
3092 delegation.token.max.lifetime.ms = 604800000
3093 delegation.token.secret.key = null
3094 delete.records.purgatory.purge.interval.requests = 1
3095 delete.topic.enable = true
3096 early.start.listeners = null
3097 fetch.max.bytes = 57671680
3098 fetch.purgatory.purge.interval.requests = 1000
3099 group.consumer.assignors = [uniform, range]
3100 group.consumer.heartbeat.interval.ms = 5000
3101 group.consumer.max.heartbeat.interval.ms = 15000
3102 group.consumer.max.session.timeout.ms = 60000
3103 group.consumer.max.size = 2147483647
3104 group.consumer.migration.policy = bidirectional
3105 group.consumer.min.heartbeat.interval.ms = 5000
3106 group.consumer.min.session.timeout.ms = 45000
3107 group.consumer.regex.refresh.interval.ms = 600000
3108 group.consumer.session.timeout.ms = 45000
3109 group.coordinator.append.linger.ms = 5
3110 group.coordinator.rebalance.protocols = [classic, consumer, streams]
3111 group.coordinator.threads = 4
3112 group.initial.rebalance.delay.ms = 3000
3113 group.max.session.timeout.ms = 1800000
3114 group.max.size = 2147483647
3115 group.min.session.timeout.ms = 6000
3116 group.share.assignors = [simple]
3117 group.share.delivery.count.limit = 5
3118 group.share.enable = false
3119 group.share.heartbeat.interval.ms = 5000
3120 group.share.max.heartbeat.interval.ms = 15000
3121 group.share.max.record.lock.duration.ms = 60000
3122 group.share.max.session.timeout.ms = 60000
3123 group.share.max.share.sessions = 2000
3124 group.share.max.size = 200
3125 group.share.min.heartbeat.interval.ms = 5000
3126 group.share.min.record.lock.duration.ms = 15000
3127 group.share.min.session.timeout.ms = 45000
3128 group.share.partition.max.record.locks = 2000
3129 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
3130 group.share.record.lock.duration.ms = 30000
3131 group.share.session.timeout.ms = 45000
3132 group.streams.heartbeat.interval.ms = 5000
3133 group.streams.max.heartbeat.interval.ms = 15000
3134 group.streams.max.session.timeout.ms = 60000
3135 group.streams.max.size = 2147483647
3136 group.streams.max.standby.replicas = 2
3137 group.streams.min.heartbeat.interval.ms = 5000
3138 group.streams.min.session.timeout.ms = 45000
3139 group.streams.num.standby.replicas = 0
3140 group.streams.session.timeout.ms = 45000
3141 initial.broker.registration.timeout.ms = 60000
3142 inter.broker.listener.name = BROKER
3143 internal.metadata.delete.delay.millis = 60000
3144 internal.metadata.log.segment.bytes = null
3145 internal.metadata.max.batch.size.in.bytes = 8388608
3146 internal.metadata.max.fetch.size.in.bytes = 8388608
3147 kafka.metrics.polling.interval.secs = 10
3148 kafka.metrics.reporters = []
3149 leader.imbalance.check.interval.seconds = 300
3150 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
3151 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
3152 log.cleaner.backoff.ms = 15000
3153 log.cleaner.dedupe.buffer.size = 1048577
3154 log.cleaner.delete.retention.ms = 86400000
3155 log.cleaner.enable = true
3156 log.cleaner.io.buffer.load.factor = 0.9
3157 log.cleaner.io.buffer.size = 524288
3158 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
3159 log.cleaner.max.compaction.lag.ms = 9223372036854775807
3160 log.cleaner.min.cleanable.ratio = 0.5
3161 log.cleaner.min.compaction.lag.ms = 0
3162 log.cleaner.threads = 1
3163 log.cleanup.policy = [delete]
3164 log.dir = /tmp/kafka-logs
3165 log.dir.failure.timeout.ms = 30000
3166 log.dirs = /tmp/kafka-logs15769196062054598040
3167 log.flush.interval.messages = 1
3168 log.flush.interval.ms = null
3169 log.flush.offset.checkpoint.interval.ms = 60000
3170 log.flush.scheduler.interval.ms = 9223372036854775807
3171 log.flush.start.offset.checkpoint.interval.ms = 60000
3172 log.index.interval.bytes = 4096
3173 log.index.size.max.bytes = 10485760
3174 log.initial.task.delay.ms = 30000
3175 log.local.retention.bytes = -2
3176 log.local.retention.ms = -2
3177 log.message.timestamp.after.max.ms = 3600000
3178 log.message.timestamp.before.max.ms = 9223372036854775807
3179 log.message.timestamp.type = CreateTime
3180 log.preallocate = false
3181 log.retention.bytes = -1
3182 log.retention.check.interval.ms = 300000
3183 log.retention.hours = 168
3184 log.retention.minutes = null
3185 log.retention.ms = null
3186 log.roll.hours = 168
3187 log.roll.jitter.hours = 0
3188 log.roll.jitter.ms = null
3189 log.roll.ms = null
3190 log.segment.bytes = 1073741824
3191 log.segment.delete.delay.ms = 60000
3192 max.connection.creation.rate = 2147483647
3193 max.connections = 2147483647
3194 max.connections.per.ip = 2147483647
3195 max.connections.per.ip.overrides =
3196 max.incremental.fetch.session.cache.slots = 1000
3197 max.request.partition.size.limit = 2000
3198 message.max.bytes = 1048588
3199 metadata.log.dir = null
3200 metadata.log.max.record.bytes.between.snapshots = 20971520
3201 metadata.log.max.snapshot.interval.ms = 3600000
3202 metadata.log.segment.bytes = 1073741824
3203 metadata.log.segment.ms = 604800000
3204 metadata.max.idle.interval.ms = 500
3205 metadata.max.retention.bytes = 104857600
3206 metadata.max.retention.ms = 604800000
3207 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3208 metrics.num.samples = 2
3209 metrics.recording.level = INFO
3210 metrics.sample.window.ms = 30000
3211 min.insync.replicas = 1
3212 node.id = 0
3213 num.io.threads = 8
3214 num.network.threads = 3
3215 num.partitions = 1
3216 num.recovery.threads.per.data.dir = 2
3217 num.replica.alter.log.dirs.threads = null
3218 num.replica.fetchers = 1
3219 offset.metadata.max.bytes = 4096
3220 offsets.commit.timeout.ms = 5000
3221 offsets.load.buffer.size = 5242880
3222 offsets.retention.check.interval.ms = 600000
3223 offsets.retention.minutes = 10080
3224 offsets.topic.compression.codec = 0
3225 offsets.topic.num.partitions = 1
3226 offsets.topic.replication.factor = 1
3227 offsets.topic.segment.bytes = 104857600
3228 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
3229 process.roles = [broker, controller]
3230 producer.id.expiration.check.interval.ms = 600000
3231 producer.id.expiration.ms = 86400000
3232 producer.purgatory.purge.interval.requests = 1000
3233 queued.max.request.bytes = -1
3234 queued.max.requests = 500
3235 quota.window.num = 11
3236 quota.window.size.seconds = 1
3237 remote.fetch.max.wait.ms = 500
3238 remote.list.offsets.request.timeout.ms = 30000
3239 remote.log.index.file.cache.total.size.bytes = 1073741824
3240 remote.log.manager.copier.thread.pool.size = 10
3241 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
3242 remote.log.manager.copy.quota.window.num = 11
3243 remote.log.manager.copy.quota.window.size.seconds = 1
3244 remote.log.manager.expiration.thread.pool.size = 10
3245 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
3246 remote.log.manager.fetch.quota.window.num = 11
3247 remote.log.manager.fetch.quota.window.size.seconds = 1
3248 remote.log.manager.task.interval.ms = 30000
3249 remote.log.manager.task.retry.backoff.max.ms = 30000
3250 remote.log.manager.task.retry.backoff.ms = 500
3251 remote.log.manager.task.retry.jitter = 0.2
3252 remote.log.manager.thread.pool.size = 2
3253 remote.log.metadata.custom.metadata.max.bytes = 128
3254 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
3255 remote.log.metadata.manager.class.path = null
3256 remote.log.metadata.manager.impl.prefix = rlmm.config.
3257 remote.log.metadata.manager.listener.name = null
3258 remote.log.reader.max.pending.tasks = 100
3259 remote.log.reader.threads = 10
3260 remote.log.storage.manager.class.name = null
3261 remote.log.storage.manager.class.path = null
3262 remote.log.storage.manager.impl.prefix = rsm.config.
3263 remote.log.storage.system.enable = false
3264 replica.fetch.backoff.ms = 1000
3265 replica.fetch.max.bytes = 1048576
3266 replica.fetch.min.bytes = 1
3267 replica.fetch.response.max.bytes = 10485760
3268 replica.fetch.wait.max.ms = 500
3269 replica.high.watermark.checkpoint.interval.ms = 5000
3270 replica.lag.time.max.ms = 30000
3271 replica.selector.class = null
3272 replica.socket.receive.buffer.bytes = 65536
3273 replica.socket.timeout.ms = 30000
3274 replication.quota.window.num = 11
3275 replication.quota.window.size.seconds = 1
3276 request.timeout.ms = 30000
3277 sasl.client.callback.handler.class = null
3278 sasl.enabled.mechanisms = [GSSAPI]
3279 sasl.jaas.config = null
3280 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3281 sasl.kerberos.min.time.before.relogin = 60000
3282 sasl.kerberos.principal.to.local.rules = [DEFAULT]
3283 sasl.kerberos.service.name = null
3284 sasl.kerberos.ticket.renew.jitter = 0.05
3285 sasl.kerberos.ticket.renew.window.factor = 0.8
3286 sasl.login.callback.handler.class = null
3287 sasl.login.class = null
3288 sasl.login.connect.timeout.ms = null
3289 sasl.login.read.timeout.ms = null
3290 sasl.login.refresh.buffer.seconds = 300
3291 sasl.login.refresh.min.period.seconds = 60
3292 sasl.login.refresh.window.factor = 0.8
3293 sasl.login.refresh.window.jitter = 0.05
3294 sasl.login.retry.backoff.max.ms = 10000
3295 sasl.login.retry.backoff.ms = 100
3296 sasl.mechanism.controller.protocol = GSSAPI
3297 sasl.mechanism.inter.broker.protocol = GSSAPI
3298 sasl.oauthbearer.assertion.algorithm = RS256
3299 sasl.oauthbearer.assertion.claim.aud = null
3300 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3301 sasl.oauthbearer.assertion.claim.iss = null
3302 sasl.oauthbearer.assertion.claim.jti.include = false
3303 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3304 sasl.oauthbearer.assertion.claim.sub = null
3305 sasl.oauthbearer.assertion.file = null
3306 sasl.oauthbearer.assertion.private.key.file = null
3307 sasl.oauthbearer.assertion.private.key.passphrase = null
3308 sasl.oauthbearer.assertion.template.file = null
3309 sasl.oauthbearer.client.credentials.client.id = null
3310 sasl.oauthbearer.client.credentials.client.secret = null
3311 sasl.oauthbearer.clock.skew.seconds = 30
3312 sasl.oauthbearer.expected.audience = null
3313 sasl.oauthbearer.expected.issuer = null
3314 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3315 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3316 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3317 sasl.oauthbearer.jwks.endpoint.url = null
3318 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3319 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3320 sasl.oauthbearer.scope = null
3321 sasl.oauthbearer.scope.claim.name = scope
3322 sasl.oauthbearer.sub.claim.name = sub
3323 sasl.oauthbearer.token.endpoint.url = null
3324 sasl.server.callback.handler.class = null
3325 sasl.server.max.receive.size = 524288
3326 security.inter.broker.protocol = PLAINTEXT
3327 security.providers = null
3328 server.max.startup.time.ms = 9223372036854775807
3329 share.coordinator.append.linger.ms = 5
3330 share.coordinator.cold.partition.snapshot.interval.ms = 300000
3331 share.coordinator.load.buffer.size = 5242880
3332 share.coordinator.snapshot.update.records.per.snapshot = 500
3333 share.coordinator.state.topic.compression.codec = 0
3334 share.coordinator.state.topic.min.isr = 2
3335 share.coordinator.state.topic.num.partitions = 50
3336 share.coordinator.state.topic.prune.interval.ms = 300000
3337 share.coordinator.state.topic.replication.factor = 3
3338 share.coordinator.state.topic.segment.bytes = 104857600
3339 share.coordinator.threads = 1
3340 share.coordinator.write.timeout.ms = 5000
3341 share.fetch.purgatory.purge.interval.requests = 1000
3342 socket.connection.setup.timeout.max.ms = 30000
3343 socket.connection.setup.timeout.ms = 10000
3344 socket.listen.backlog.size = 50
3345 socket.receive.buffer.bytes = 102400
3346 socket.request.max.bytes = 104857600
3347 socket.send.buffer.bytes = 102400
3348 ssl.allow.dn.changes = false
3349 ssl.allow.san.changes = false
3350 ssl.cipher.suites = []
3351 ssl.client.auth = none
3352 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3353 ssl.endpoint.identification.algorithm = https
3354 ssl.engine.factory.class = null
3355 ssl.key.password = null
3356 ssl.keymanager.algorithm = SunX509
3357 ssl.keystore.certificate.chain = null
3358 ssl.keystore.key = null
3359 ssl.keystore.location = null
3360 ssl.keystore.password = null
3361 ssl.keystore.type = JKS
3362 ssl.principal.mapping.rules = DEFAULT
3363 ssl.protocol = TLSv1.3
3364 ssl.provider = null
3365 ssl.secure.random.implementation = null
3366 ssl.trustmanager.algorithm = PKIX
3367 ssl.truststore.certificates = null
3368 ssl.truststore.location = null
3369 ssl.truststore.password = null
3370 ssl.truststore.type = JKS
3371 telemetry.max.bytes = 1048576
3372 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
3373 transaction.max.timeout.ms = 900000
3374 transaction.partition.verification.enable = true
3375 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
3376 transaction.state.log.load.buffer.size = 5242880
3377 transaction.state.log.min.isr = 1
3378 transaction.state.log.num.partitions = 50
3379 transaction.state.log.replication.factor = 1
3380 transaction.state.log.segment.bytes = 104857600
3381 transaction.two.phase.commit.enable = false
3382 transactional.id.expiration.ms = 604800000
3383 unclean.leader.election.enable = false
3384 unclean.leader.election.interval.ms = 300000
3385 unstable.api.versions.enable = false
3386 unstable.feature.versions.enable = false
3387
338801:32:21.304 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker to be unfenced
338901:32:21.305 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to unfence has been granted because it has caught up with the offset of its register broker record 5.
339001:32:21.312 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=-1, inControlledShutdown=0, logDirs=[])
339101:32:21.336 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has been unfenced. Transitioning from RECOVERY to RUNNING.
339201:32:21.336 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker to be unfenced
339301:32:21.336 [pool-67-thread-9] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint BROKER. Endpoint is now READY.
339401:32:21.336 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Enabling request processing.
339501:32:21.337 [pool-67-thread-9] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6001.
339601:32:21.338 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the authorizer futures to be completed
339701:32:21.338 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the authorizer futures to be completed
339801:32:21.338 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the SocketServer Acceptors to be started
339901:32:21.338 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
340001:32:21.338 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTING to STARTED
340101:32:21.362 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3402 acks = -1
3403 batch.size = 16384
3404 bootstrap.servers = [localhost:6001]
3405 buffer.memory = 33554432
3406 client.dns.lookup = use_all_dns_ips
3407 client.id = producer-1
3408 compression.gzip.level = -1
3409 compression.lz4.level = 9
3410 compression.type = none
3411 compression.zstd.level = 3
3412 connections.max.idle.ms = 540000
3413 delivery.timeout.ms = 120000
3414 enable.idempotence = true
3415 enable.metrics.push = true
3416 interceptor.classes = []
3417 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3418 linger.ms = 5
3419 max.block.ms = 10000
3420 max.in.flight.requests.per.connection = 5
3421 max.request.size = 1048576
3422 metadata.max.age.ms = 300000
3423 metadata.max.idle.ms = 300000
3424 metadata.recovery.rebootstrap.trigger.ms = 300000
3425 metadata.recovery.strategy = rebootstrap
3426 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3427 metrics.num.samples = 2
3428 metrics.recording.level = INFO
3429 metrics.sample.window.ms = 30000
3430 partitioner.adaptive.partitioning.enable = true
3431 partitioner.availability.timeout.ms = 0
3432 partitioner.class = null
3433 partitioner.ignore.keys = false
3434 receive.buffer.bytes = 32768
3435 reconnect.backoff.max.ms = 1000
3436 reconnect.backoff.ms = 50
3437 request.timeout.ms = 30000
3438 retries = 2147483647
3439 retry.backoff.max.ms = 1000
3440 retry.backoff.ms = 1000
3441 sasl.client.callback.handler.class = null
3442 sasl.jaas.config = null
3443 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3444 sasl.kerberos.min.time.before.relogin = 60000
3445 sasl.kerberos.service.name = null
3446 sasl.kerberos.ticket.renew.jitter = 0.05
3447 sasl.kerberos.ticket.renew.window.factor = 0.8
3448 sasl.login.callback.handler.class = null
3449 sasl.login.class = null
3450 sasl.login.connect.timeout.ms = null
3451 sasl.login.read.timeout.ms = null
3452 sasl.login.refresh.buffer.seconds = 300
3453 sasl.login.refresh.min.period.seconds = 60
3454 sasl.login.refresh.window.factor = 0.8
3455 sasl.login.refresh.window.jitter = 0.05
3456 sasl.login.retry.backoff.max.ms = 10000
3457 sasl.login.retry.backoff.ms = 100
3458 sasl.mechanism = GSSAPI
3459 sasl.oauthbearer.assertion.algorithm = RS256
3460 sasl.oauthbearer.assertion.claim.aud = null
3461 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3462 sasl.oauthbearer.assertion.claim.iss = null
3463 sasl.oauthbearer.assertion.claim.jti.include = false
3464 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3465 sasl.oauthbearer.assertion.claim.sub = null
3466 sasl.oauthbearer.assertion.file = null
3467 sasl.oauthbearer.assertion.private.key.file = null
3468 sasl.oauthbearer.assertion.private.key.passphrase = null
3469 sasl.oauthbearer.assertion.template.file = null
3470 sasl.oauthbearer.client.credentials.client.id = null
3471 sasl.oauthbearer.client.credentials.client.secret = null
3472 sasl.oauthbearer.clock.skew.seconds = 30
3473 sasl.oauthbearer.expected.audience = null
3474 sasl.oauthbearer.expected.issuer = null
3475 sasl.oauthbearer.header.urlencode = false
3476 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3477 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3478 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3479 sasl.oauthbearer.jwks.endpoint.url = null
3480 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3481 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3482 sasl.oauthbearer.scope = null
3483 sasl.oauthbearer.scope.claim.name = scope
3484 sasl.oauthbearer.sub.claim.name = sub
3485 sasl.oauthbearer.token.endpoint.url = null
3486 security.protocol = PLAINTEXT
3487 security.providers = null
3488 send.buffer.bytes = 131072
3489 socket.connection.setup.timeout.max.ms = 30000
3490 socket.connection.setup.timeout.ms = 10000
3491 ssl.cipher.suites = null
3492 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3493 ssl.endpoint.identification.algorithm = https
3494 ssl.engine.factory.class = null
3495 ssl.key.password = null
3496 ssl.keymanager.algorithm = SunX509
3497 ssl.keystore.certificate.chain = null
3498 ssl.keystore.key = null
3499 ssl.keystore.location = null
3500 ssl.keystore.password = null
3501 ssl.keystore.type = JKS
3502 ssl.protocol = TLSv1.3
3503 ssl.provider = null
3504 ssl.secure.random.implementation = null
3505 ssl.trustmanager.algorithm = PKIX
3506 ssl.truststore.certificates = null
3507 ssl.truststore.location = null
3508 ssl.truststore.password = null
3509 ssl.truststore.type = JKS
3510 transaction.timeout.ms = 60000
3511 transaction.two.phase.commit.enable = false
3512 transactional.id = null
3513 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3514
351501:32:21.392 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
351601:32:21.403 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Instantiated an idempotent producer.
351701:32:21.423 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
351801:32:21.423 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
351901:32:21.423 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832341423
352001:32:21.443 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t1) to the active controller.
352101:32:21.449 [kafka-producer-network-thread | producer-1] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-1] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t1=UNKNOWN_TOPIC_OR_PARTITION}
352201:32:21.450 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.Metadata - [Producer clientId=producer-1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
352301:32:21.464 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
352401:32:21.464 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t1 with topic ID kA9eT6JuQ2KYgHsBQ_CgMQ.
352501:32:21.467 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t1-0 with topic ID kA9eT6JuQ2KYgHsBQ_CgMQ and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
352601:32:21.473 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
352701:32:21.475 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t1-0)
352801:32:21.477 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t1-0 with topic id kA9eT6JuQ2KYgHsBQ_CgMQ.
352901:32:21.480 [quorum-controller-0-event-handler] INFO o.a.k.c.ProducerIdControlManager - [QuorumController id=0] Replaying ProducerIdsRecord ProducerIdsRecord(brokerId=0, brokerEpoch=5, nextProducerId=1000)
353001:32:21.491 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t1-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
353101:32:21.493 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t1-0 in /tmp/kafka-logs15769196062054598040/t1-0 with properties {}
353201:32:21.494 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] No checkpointed highwatermark is found for partition t1-0
353301:32:21.496 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] Log loaded for partition t1-0 with initial high watermark 0
353401:32:21.502 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t1-0 with topic id Some(kA9eT6JuQ2KYgHsBQ_CgMQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
353501:32:22.456 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-1] ProducerId set to 0 with epoch 0
353601:32:22.490 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
353701:32:22.496 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
353801:32:22.496 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
353901:32:22.496 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
354001:32:22.496 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
354101:32:22.497 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-1 unregistered
354201:32:22.498 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3543 acks = -1
3544 batch.size = 16384
3545 bootstrap.servers = [localhost:6001]
3546 buffer.memory = 33554432
3547 client.dns.lookup = use_all_dns_ips
3548 client.id = producer-2
3549 compression.gzip.level = -1
3550 compression.lz4.level = 9
3551 compression.type = none
3552 compression.zstd.level = 3
3553 connections.max.idle.ms = 540000
3554 delivery.timeout.ms = 120000
3555 enable.idempotence = true
3556 enable.metrics.push = true
3557 interceptor.classes = []
3558 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3559 linger.ms = 5
3560 max.block.ms = 10000
3561 max.in.flight.requests.per.connection = 5
3562 max.request.size = 1048576
3563 metadata.max.age.ms = 300000
3564 metadata.max.idle.ms = 300000
3565 metadata.recovery.rebootstrap.trigger.ms = 300000
3566 metadata.recovery.strategy = rebootstrap
3567 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3568 metrics.num.samples = 2
3569 metrics.recording.level = INFO
3570 metrics.sample.window.ms = 30000
3571 partitioner.adaptive.partitioning.enable = true
3572 partitioner.availability.timeout.ms = 0
3573 partitioner.class = null
3574 partitioner.ignore.keys = false
3575 receive.buffer.bytes = 32768
3576 reconnect.backoff.max.ms = 1000
3577 reconnect.backoff.ms = 50
3578 request.timeout.ms = 30000
3579 retries = 2147483647
3580 retry.backoff.max.ms = 1000
3581 retry.backoff.ms = 1000
3582 sasl.client.callback.handler.class = null
3583 sasl.jaas.config = null
3584 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3585 sasl.kerberos.min.time.before.relogin = 60000
3586 sasl.kerberos.service.name = null
3587 sasl.kerberos.ticket.renew.jitter = 0.05
3588 sasl.kerberos.ticket.renew.window.factor = 0.8
3589 sasl.login.callback.handler.class = null
3590 sasl.login.class = null
3591 sasl.login.connect.timeout.ms = null
3592 sasl.login.read.timeout.ms = null
3593 sasl.login.refresh.buffer.seconds = 300
3594 sasl.login.refresh.min.period.seconds = 60
3595 sasl.login.refresh.window.factor = 0.8
3596 sasl.login.refresh.window.jitter = 0.05
3597 sasl.login.retry.backoff.max.ms = 10000
3598 sasl.login.retry.backoff.ms = 100
3599 sasl.mechanism = GSSAPI
3600 sasl.oauthbearer.assertion.algorithm = RS256
3601 sasl.oauthbearer.assertion.claim.aud = null
3602 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3603 sasl.oauthbearer.assertion.claim.iss = null
3604 sasl.oauthbearer.assertion.claim.jti.include = false
3605 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3606 sasl.oauthbearer.assertion.claim.sub = null
3607 sasl.oauthbearer.assertion.file = null
3608 sasl.oauthbearer.assertion.private.key.file = null
3609 sasl.oauthbearer.assertion.private.key.passphrase = null
3610 sasl.oauthbearer.assertion.template.file = null
3611 sasl.oauthbearer.client.credentials.client.id = null
3612 sasl.oauthbearer.client.credentials.client.secret = null
3613 sasl.oauthbearer.clock.skew.seconds = 30
3614 sasl.oauthbearer.expected.audience = null
3615 sasl.oauthbearer.expected.issuer = null
3616 sasl.oauthbearer.header.urlencode = false
3617 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3618 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3619 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3620 sasl.oauthbearer.jwks.endpoint.url = null
3621 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3622 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3623 sasl.oauthbearer.scope = null
3624 sasl.oauthbearer.scope.claim.name = scope
3625 sasl.oauthbearer.sub.claim.name = sub
3626 sasl.oauthbearer.token.endpoint.url = null
3627 security.protocol = PLAINTEXT
3628 security.providers = null
3629 send.buffer.bytes = 131072
3630 socket.connection.setup.timeout.max.ms = 30000
3631 socket.connection.setup.timeout.ms = 10000
3632 ssl.cipher.suites = null
3633 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3634 ssl.endpoint.identification.algorithm = https
3635 ssl.engine.factory.class = null
3636 ssl.key.password = null
3637 ssl.keymanager.algorithm = SunX509
3638 ssl.keystore.certificate.chain = null
3639 ssl.keystore.key = null
3640 ssl.keystore.location = null
3641 ssl.keystore.password = null
3642 ssl.keystore.type = JKS
3643 ssl.protocol = TLSv1.3
3644 ssl.provider = null
3645 ssl.secure.random.implementation = null
3646 ssl.trustmanager.algorithm = PKIX
3647 ssl.truststore.certificates = null
3648 ssl.truststore.location = null
3649 ssl.truststore.password = null
3650 ssl.truststore.type = JKS
3651 transaction.timeout.ms = 60000
3652 transaction.two.phase.commit.enable = false
3653 transactional.id = null
3654 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3655
365601:32:22.498 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
365701:32:22.499 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Instantiated an idempotent producer.
365801:32:22.503 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
365901:32:22.503 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
366001:32:22.503 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832342503
366101:32:22.511 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.Metadata - [Producer clientId=producer-2] Cluster ID: 4oa31apqQtabsfPXH-H0RA
366201:32:22.512 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-2] ProducerId set to 1 with epoch 0
366301:32:22.525 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
366401:32:22.529 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
366501:32:22.529 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
366601:32:22.529 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
366701:32:22.529 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
366801:32:22.530 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-2 unregistered
366901:32:22.530 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3670 acks = -1
3671 batch.size = 16384
3672 bootstrap.servers = [localhost:6001]
3673 buffer.memory = 33554432
3674 client.dns.lookup = use_all_dns_ips
3675 client.id = producer-3
3676 compression.gzip.level = -1
3677 compression.lz4.level = 9
3678 compression.type = none
3679 compression.zstd.level = 3
3680 connections.max.idle.ms = 540000
3681 delivery.timeout.ms = 120000
3682 enable.idempotence = true
3683 enable.metrics.push = true
3684 interceptor.classes = []
3685 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3686 linger.ms = 5
3687 max.block.ms = 10000
3688 max.in.flight.requests.per.connection = 5
3689 max.request.size = 1048576
3690 metadata.max.age.ms = 300000
3691 metadata.max.idle.ms = 300000
3692 metadata.recovery.rebootstrap.trigger.ms = 300000
3693 metadata.recovery.strategy = rebootstrap
3694 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3695 metrics.num.samples = 2
3696 metrics.recording.level = INFO
3697 metrics.sample.window.ms = 30000
3698 partitioner.adaptive.partitioning.enable = true
3699 partitioner.availability.timeout.ms = 0
3700 partitioner.class = null
3701 partitioner.ignore.keys = false
3702 receive.buffer.bytes = 32768
3703 reconnect.backoff.max.ms = 1000
3704 reconnect.backoff.ms = 50
3705 request.timeout.ms = 30000
3706 retries = 2147483647
3707 retry.backoff.max.ms = 1000
3708 retry.backoff.ms = 1000
3709 sasl.client.callback.handler.class = null
3710 sasl.jaas.config = null
3711 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3712 sasl.kerberos.min.time.before.relogin = 60000
3713 sasl.kerberos.service.name = null
3714 sasl.kerberos.ticket.renew.jitter = 0.05
3715 sasl.kerberos.ticket.renew.window.factor = 0.8
3716 sasl.login.callback.handler.class = null
3717 sasl.login.class = null
3718 sasl.login.connect.timeout.ms = null
3719 sasl.login.read.timeout.ms = null
3720 sasl.login.refresh.buffer.seconds = 300
3721 sasl.login.refresh.min.period.seconds = 60
3722 sasl.login.refresh.window.factor = 0.8
3723 sasl.login.refresh.window.jitter = 0.05
3724 sasl.login.retry.backoff.max.ms = 10000
3725 sasl.login.retry.backoff.ms = 100
3726 sasl.mechanism = GSSAPI
3727 sasl.oauthbearer.assertion.algorithm = RS256
3728 sasl.oauthbearer.assertion.claim.aud = null
3729 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3730 sasl.oauthbearer.assertion.claim.iss = null
3731 sasl.oauthbearer.assertion.claim.jti.include = false
3732 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3733 sasl.oauthbearer.assertion.claim.sub = null
3734 sasl.oauthbearer.assertion.file = null
3735 sasl.oauthbearer.assertion.private.key.file = null
3736 sasl.oauthbearer.assertion.private.key.passphrase = null
3737 sasl.oauthbearer.assertion.template.file = null
3738 sasl.oauthbearer.client.credentials.client.id = null
3739 sasl.oauthbearer.client.credentials.client.secret = null
3740 sasl.oauthbearer.clock.skew.seconds = 30
3741 sasl.oauthbearer.expected.audience = null
3742 sasl.oauthbearer.expected.issuer = null
3743 sasl.oauthbearer.header.urlencode = false
3744 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3745 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3746 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3747 sasl.oauthbearer.jwks.endpoint.url = null
3748 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3749 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3750 sasl.oauthbearer.scope = null
3751 sasl.oauthbearer.scope.claim.name = scope
3752 sasl.oauthbearer.sub.claim.name = sub
3753 sasl.oauthbearer.token.endpoint.url = null
3754 security.protocol = PLAINTEXT
3755 security.providers = null
3756 send.buffer.bytes = 131072
3757 socket.connection.setup.timeout.max.ms = 30000
3758 socket.connection.setup.timeout.ms = 10000
3759 ssl.cipher.suites = null
3760 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3761 ssl.endpoint.identification.algorithm = https
3762 ssl.engine.factory.class = null
3763 ssl.key.password = null
3764 ssl.keymanager.algorithm = SunX509
3765 ssl.keystore.certificate.chain = null
3766 ssl.keystore.key = null
3767 ssl.keystore.location = null
3768 ssl.keystore.password = null
3769 ssl.keystore.type = JKS
3770 ssl.protocol = TLSv1.3
3771 ssl.provider = null
3772 ssl.secure.random.implementation = null
3773 ssl.trustmanager.algorithm = PKIX
3774 ssl.truststore.certificates = null
3775 ssl.truststore.location = null
3776 ssl.truststore.password = null
3777 ssl.truststore.type = JKS
3778 transaction.timeout.ms = 60000
3779 transaction.two.phase.commit.enable = false
3780 transactional.id = null
3781 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3782
378301:32:22.532 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
378401:32:22.533 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Instantiated an idempotent producer.
378501:32:22.540 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
378601:32:22.540 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
378701:32:22.541 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832342540
378801:32:22.545 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.Metadata - [Producer clientId=producer-3] Cluster ID: 4oa31apqQtabsfPXH-H0RA
378901:32:22.547 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-3] ProducerId set to 2 with epoch 0
379001:32:22.559 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
379101:32:22.563 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
379201:32:22.563 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
379301:32:22.563 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
379401:32:22.563 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
379501:32:22.563 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-3 unregistered
379601:32:22.586 [virtual-594] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
3797 allow.auto.create.topics = true
3798 auto.commit.interval.ms = 5000
3799 auto.offset.reset = earliest
3800 bootstrap.servers = [localhost:6001]
3801 check.crcs = true
3802 client.dns.lookup = use_all_dns_ips
3803 client.id = consumer-g1-1
3804 client.rack =
3805 connections.max.idle.ms = 540000
3806 default.api.timeout.ms = 60000
3807 enable.auto.commit = false
3808 enable.metrics.push = true
3809 exclude.internal.topics = true
3810 fetch.max.bytes = 52428800
3811 fetch.max.wait.ms = 500
3812 fetch.min.bytes = 1
3813 group.id = g1
3814 group.instance.id = null
3815 group.protocol = classic
3816 group.remote.assignor = null
3817 heartbeat.interval.ms = 3000
3818 interceptor.classes = []
3819 internal.leave.group.on.close = true
3820 internal.throw.on.fetch.stable.offset.unsupported = false
3821 isolation.level = read_uncommitted
3822 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3823 max.partition.fetch.bytes = 1048576
3824 max.poll.interval.ms = 300000
3825 max.poll.records = 500
3826 metadata.max.age.ms = 300000
3827 metadata.recovery.rebootstrap.trigger.ms = 300000
3828 metadata.recovery.strategy = rebootstrap
3829 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3830 metrics.num.samples = 2
3831 metrics.recording.level = INFO
3832 metrics.sample.window.ms = 30000
3833 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
3834 receive.buffer.bytes = 65536
3835 reconnect.backoff.max.ms = 1000
3836 reconnect.backoff.ms = 50
3837 request.timeout.ms = 30000
3838 retry.backoff.max.ms = 1000
3839 retry.backoff.ms = 100
3840 sasl.client.callback.handler.class = null
3841 sasl.jaas.config = null
3842 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3843 sasl.kerberos.min.time.before.relogin = 60000
3844 sasl.kerberos.service.name = null
3845 sasl.kerberos.ticket.renew.jitter = 0.05
3846 sasl.kerberos.ticket.renew.window.factor = 0.8
3847 sasl.login.callback.handler.class = null
3848 sasl.login.class = null
3849 sasl.login.connect.timeout.ms = null
3850 sasl.login.read.timeout.ms = null
3851 sasl.login.refresh.buffer.seconds = 300
3852 sasl.login.refresh.min.period.seconds = 60
3853 sasl.login.refresh.window.factor = 0.8
3854 sasl.login.refresh.window.jitter = 0.05
3855 sasl.login.retry.backoff.max.ms = 10000
3856 sasl.login.retry.backoff.ms = 100
3857 sasl.mechanism = GSSAPI
3858 sasl.oauthbearer.assertion.algorithm = RS256
3859 sasl.oauthbearer.assertion.claim.aud = null
3860 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3861 sasl.oauthbearer.assertion.claim.iss = null
3862 sasl.oauthbearer.assertion.claim.jti.include = false
3863 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3864 sasl.oauthbearer.assertion.claim.sub = null
3865 sasl.oauthbearer.assertion.file = null
3866 sasl.oauthbearer.assertion.private.key.file = null
3867 sasl.oauthbearer.assertion.private.key.passphrase = null
3868 sasl.oauthbearer.assertion.template.file = null
3869 sasl.oauthbearer.client.credentials.client.id = null
3870 sasl.oauthbearer.client.credentials.client.secret = null
3871 sasl.oauthbearer.clock.skew.seconds = 30
3872 sasl.oauthbearer.expected.audience = null
3873 sasl.oauthbearer.expected.issuer = null
3874 sasl.oauthbearer.header.urlencode = false
3875 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3876 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3877 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3878 sasl.oauthbearer.jwks.endpoint.url = null
3879 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3880 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3881 sasl.oauthbearer.scope = null
3882 sasl.oauthbearer.scope.claim.name = scope
3883 sasl.oauthbearer.sub.claim.name = sub
3884 sasl.oauthbearer.token.endpoint.url = null
3885 security.protocol = PLAINTEXT
3886 security.providers = null
3887 send.buffer.bytes = 131072
3888 session.timeout.ms = 45000
3889 share.acknowledgement.mode = implicit
3890 socket.connection.setup.timeout.max.ms = 30000
3891 socket.connection.setup.timeout.ms = 10000
3892 ssl.cipher.suites = null
3893 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3894 ssl.endpoint.identification.algorithm = https
3895 ssl.engine.factory.class = null
3896 ssl.key.password = null
3897 ssl.keymanager.algorithm = SunX509
3898 ssl.keystore.certificate.chain = null
3899 ssl.keystore.key = null
3900 ssl.keystore.location = null
3901 ssl.keystore.password = null
3902 ssl.keystore.type = JKS
3903 ssl.protocol = TLSv1.3
3904 ssl.provider = null
3905 ssl.secure.random.implementation = null
3906 ssl.trustmanager.algorithm = PKIX
3907 ssl.truststore.certificates = null
3908 ssl.truststore.location = null
3909 ssl.truststore.password = null
3910 ssl.truststore.type = JKS
3911 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3912
391301:32:22.597 [virtual-594] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
391401:32:22.634 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
391501:32:22.634 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
391601:32:22.634 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832342634
391701:32:22.655 [virtual-601] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g1-1, groupId=g1] Subscribed to topic(s): t1
391801:32:22.662 [virtual-601] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g1-1, groupId=g1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
391901:32:22.663 [data-plane-kafka-request-handler-7] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(__consumer_offsets) to the active controller.
392001:32:22.667 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='__consumer_offsets', numPartitions=1, replicationFactor=1, assignments=[], configs=[CreatableTopicConfig(name='compression.type', value='producer'), CreatableTopicConfig(name='cleanup.policy', value='compact'), CreatableTopicConfig(name='segment.bytes', value='104857600')]): SUCCESS
392101:32:22.667 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic __consumer_offsets with topic ID 2gGNgE87Sc-nMQZzezX5KA.
392201:32:22.668 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration compression.type to producer
392301:32:22.668 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration cleanup.policy to compact
392401:32:22.668 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration segment.bytes to 104857600
392501:32:22.668 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition __consumer_offsets-0 with topic ID 2gGNgE87Sc-nMQZzezX5KA and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
392601:32:22.695 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
392701:32:22.696 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(__consumer_offsets-0)
392801:32:22.696 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition __consumer_offsets-0 with topic id 2gGNgE87Sc-nMQZzezX5KA.
392901:32:22.698 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__consumer_offsets-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
393001:32:22.699 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition __consumer_offsets-0 in /tmp/kafka-logs15769196062054598040/__consumer_offsets-0 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600}
393101:32:22.699 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0
393201:32:22.699 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0
393301:32:22.699 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader __consumer_offsets-0 with topic id Some(2gGNgE87Sc-nMQZzezX5KA) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
393401:32:22.701 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling loading of metadata from __consumer_offsets-0 with epoch 0
393501:32:22.708 [kafka-0-metadata-loader-event-handler] INFO k.s.m.DynamicConfigPublisher - [DynamicConfigPublisher broker id=0] Updating topic __consumer_offsets with new configuration : compression.type -> producer,cleanup.policy -> compact,segment.bytes -> 104857600
393601:32:22.722 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished loading of metadata from __consumer_offsets-0 with epoch 0 in 1ms where 1ms was spent in the scheduler. Loaded 0 records which total to 0 bytes.
393701:32:22.765 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
393801:32:22.767 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
393901:32:22.777 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g1 in Empty state. Created a new member id consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569 and requesting the member to rejoin with this id.
394001:32:22.779 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: need to re-join with the given member-id: consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569
394101:32:22.780 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
394201:32:22.784 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569 joins group g1 in Empty state. Adding to the group now.
394301:32:22.785 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569 with group instance id null; client reason: need to re-join with the given member-id: consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569).
394401:32:25.788 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g1 generation 1 with 1 members.
394501:32:25.790 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569', protocol='range'}
394601:32:25.795 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Finished assignment for group at generation 1: {consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569=Assignment(partitions=[t1-0])}
394701:32:25.799 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569 for group g1 for generation 1. The group has 1 members, 0 of which are static.
394801:32:25.807 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569', protocol='range'}
394901:32:25.807 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Notifying assignor about the new Assignment(partitions=[t1-0])
395001:32:25.809 [virtual-601] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Adding newly assigned partitions: [t1-0]
395101:32:25.816 [virtual-601] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Found no committed offset for partition t1-0
395201:32:25.828 [virtual-601] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting offset for partition t1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
395301:32:26.112 [virtual-594] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3954 acks = -1
3955 batch.size = 16384
3956 bootstrap.servers = [localhost:6001]
3957 buffer.memory = 33554432
3958 client.dns.lookup = use_all_dns_ips
3959 client.id = producer-4
3960 compression.gzip.level = -1
3961 compression.lz4.level = 9
3962 compression.type = none
3963 compression.zstd.level = 3
3964 connections.max.idle.ms = 540000
3965 delivery.timeout.ms = 120000
3966 enable.idempotence = true
3967 enable.metrics.push = true
3968 interceptor.classes = []
3969 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3970 linger.ms = 5
3971 max.block.ms = 10000
3972 max.in.flight.requests.per.connection = 5
3973 max.request.size = 1048576
3974 metadata.max.age.ms = 300000
3975 metadata.max.idle.ms = 300000
3976 metadata.recovery.rebootstrap.trigger.ms = 300000
3977 metadata.recovery.strategy = rebootstrap
3978 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3979 metrics.num.samples = 2
3980 metrics.recording.level = INFO
3981 metrics.sample.window.ms = 30000
3982 partitioner.adaptive.partitioning.enable = true
3983 partitioner.availability.timeout.ms = 0
3984 partitioner.class = null
3985 partitioner.ignore.keys = false
3986 receive.buffer.bytes = 32768
3987 reconnect.backoff.max.ms = 1000
3988 reconnect.backoff.ms = 50
3989 request.timeout.ms = 30000
3990 retries = 2147483647
3991 retry.backoff.max.ms = 1000
3992 retry.backoff.ms = 1000
3993 sasl.client.callback.handler.class = null
3994 sasl.jaas.config = null
3995 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3996 sasl.kerberos.min.time.before.relogin = 60000
3997 sasl.kerberos.service.name = null
3998 sasl.kerberos.ticket.renew.jitter = 0.05
3999 sasl.kerberos.ticket.renew.window.factor = 0.8
4000 sasl.login.callback.handler.class = null
4001 sasl.login.class = null
4002 sasl.login.connect.timeout.ms = null
4003 sasl.login.read.timeout.ms = null
4004 sasl.login.refresh.buffer.seconds = 300
4005 sasl.login.refresh.min.period.seconds = 60
4006 sasl.login.refresh.window.factor = 0.8
4007 sasl.login.refresh.window.jitter = 0.05
4008 sasl.login.retry.backoff.max.ms = 10000
4009 sasl.login.retry.backoff.ms = 100
4010 sasl.mechanism = GSSAPI
4011 sasl.oauthbearer.assertion.algorithm = RS256
4012 sasl.oauthbearer.assertion.claim.aud = null
4013 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4014 sasl.oauthbearer.assertion.claim.iss = null
4015 sasl.oauthbearer.assertion.claim.jti.include = false
4016 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4017 sasl.oauthbearer.assertion.claim.sub = null
4018 sasl.oauthbearer.assertion.file = null
4019 sasl.oauthbearer.assertion.private.key.file = null
4020 sasl.oauthbearer.assertion.private.key.passphrase = null
4021 sasl.oauthbearer.assertion.template.file = null
4022 sasl.oauthbearer.client.credentials.client.id = null
4023 sasl.oauthbearer.client.credentials.client.secret = null
4024 sasl.oauthbearer.clock.skew.seconds = 30
4025 sasl.oauthbearer.expected.audience = null
4026 sasl.oauthbearer.expected.issuer = null
4027 sasl.oauthbearer.header.urlencode = false
4028 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4029 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4030 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4031 sasl.oauthbearer.jwks.endpoint.url = null
4032 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4033 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4034 sasl.oauthbearer.scope = null
4035 sasl.oauthbearer.scope.claim.name = scope
4036 sasl.oauthbearer.sub.claim.name = sub
4037 sasl.oauthbearer.token.endpoint.url = null
4038 security.protocol = PLAINTEXT
4039 security.providers = null
4040 send.buffer.bytes = 131072
4041 socket.connection.setup.timeout.max.ms = 30000
4042 socket.connection.setup.timeout.ms = 10000
4043 ssl.cipher.suites = null
4044 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4045 ssl.endpoint.identification.algorithm = https
4046 ssl.engine.factory.class = null
4047 ssl.key.password = null
4048 ssl.keymanager.algorithm = SunX509
4049 ssl.keystore.certificate.chain = null
4050 ssl.keystore.key = null
4051 ssl.keystore.location = null
4052 ssl.keystore.password = null
4053 ssl.keystore.type = JKS
4054 ssl.protocol = TLSv1.3
4055 ssl.provider = null
4056 ssl.secure.random.implementation = null
4057 ssl.trustmanager.algorithm = PKIX
4058 ssl.truststore.certificates = null
4059 ssl.truststore.location = null
4060 ssl.truststore.password = null
4061 ssl.truststore.type = JKS
4062 transaction.timeout.ms = 60000
4063 transaction.two.phase.commit.enable = false
4064 transactional.id = null
4065 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4066
406701:32:26.112 [virtual-594] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
406801:32:26.113 [virtual-594] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Instantiated an idempotent producer.
406901:32:26.116 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
407001:32:26.116 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
407101:32:26.116 [virtual-594] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832346116
407201:32:26.121 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.Metadata - [Producer clientId=producer-4] Cluster ID: 4oa31apqQtabsfPXH-H0RA
407301:32:26.121 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-4] ProducerId set to 3 with epoch 0
407401:32:26.132 [virtual-594] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
407501:32:26.135 [virtual-594] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
407601:32:26.136 [virtual-594] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
407701:32:26.136 [virtual-594] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
407801:32:26.136 [virtual-594] INFO o.a.k.c.m.Metrics - Metrics reporters closed
407901:32:26.136 [virtual-594] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-4 unregistered
408001:32:26.139 [virtual-600] ERROR o.k.KafkaFlow$ - Exception when polling for records
4081java.lang.InterruptedException: null
4082 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
4083 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
4084 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
4085 at ox.channels.ActorRef.ask(actor.scala:64)
4086 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
4087 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
4088 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4089 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4090 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
4091 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
4092 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
4093 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4094 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4095 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
409601:32:26.139 [virtual-601] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
4097java.lang.InterruptedException: null
4098 ... 18 common frames omitted
4099Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
4100 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
4101 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
4102 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
4103 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
4104 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
4105 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
4106 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
4107 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
4108 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
4109 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
4110 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4111 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4112 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
4113 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
4114 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
4115 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4116 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4117 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
411801:32:26.153 [virtual-607] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Revoke previously assigned partitions [t1-0]
411901:32:26.154 [virtual-607] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Member consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
412001:32:26.155 [virtual-607] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting generation and member id due to: consumer pro-actively leaving the group
412101:32:26.155 [virtual-607] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: consumer pro-actively leaving the group
412201:32:26.156 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g1] Member consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
412301:32:26.156 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g1-1-89c4f362-407c-4f14-bc75-2e60d2f30569) members.).
412401:32:26.157 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g1 with generation 2 is now empty.
412501:32:26.642 [virtual-607] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
412601:32:26.642 [virtual-607] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
412701:32:26.642 [virtual-607] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
412801:32:26.642 [virtual-607] INFO o.a.k.c.m.Metrics - Metrics reporters closed
412901:32:26.645 [virtual-607] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g1-1 unregistered
413001:32:26.656 [virtual-609] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4131 acks = -1
4132 batch.size = 16384
4133 bootstrap.servers = [localhost:6001]
4134 buffer.memory = 33554432
4135 client.dns.lookup = use_all_dns_ips
4136 client.id = producer-5
4137 compression.gzip.level = -1
4138 compression.lz4.level = 9
4139 compression.type = none
4140 compression.zstd.level = 3
4141 connections.max.idle.ms = 540000
4142 delivery.timeout.ms = 120000
4143 enable.idempotence = true
4144 enable.metrics.push = true
4145 interceptor.classes = []
4146 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4147 linger.ms = 5
4148 max.block.ms = 60000
4149 max.in.flight.requests.per.connection = 5
4150 max.request.size = 1048576
4151 metadata.max.age.ms = 300000
4152 metadata.max.idle.ms = 300000
4153 metadata.recovery.rebootstrap.trigger.ms = 300000
4154 metadata.recovery.strategy = rebootstrap
4155 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4156 metrics.num.samples = 2
4157 metrics.recording.level = INFO
4158 metrics.sample.window.ms = 30000
4159 partitioner.adaptive.partitioning.enable = true
4160 partitioner.availability.timeout.ms = 0
4161 partitioner.class = null
4162 partitioner.ignore.keys = false
4163 receive.buffer.bytes = 32768
4164 reconnect.backoff.max.ms = 1000
4165 reconnect.backoff.ms = 50
4166 request.timeout.ms = 30000
4167 retries = 2147483647
4168 retry.backoff.max.ms = 1000
4169 retry.backoff.ms = 100
4170 sasl.client.callback.handler.class = null
4171 sasl.jaas.config = null
4172 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4173 sasl.kerberos.min.time.before.relogin = 60000
4174 sasl.kerberos.service.name = null
4175 sasl.kerberos.ticket.renew.jitter = 0.05
4176 sasl.kerberos.ticket.renew.window.factor = 0.8
4177 sasl.login.callback.handler.class = null
4178 sasl.login.class = null
4179 sasl.login.connect.timeout.ms = null
4180 sasl.login.read.timeout.ms = null
4181 sasl.login.refresh.buffer.seconds = 300
4182 sasl.login.refresh.min.period.seconds = 60
4183 sasl.login.refresh.window.factor = 0.8
4184 sasl.login.refresh.window.jitter = 0.05
4185 sasl.login.retry.backoff.max.ms = 10000
4186 sasl.login.retry.backoff.ms = 100
4187 sasl.mechanism = GSSAPI
4188 sasl.oauthbearer.assertion.algorithm = RS256
4189 sasl.oauthbearer.assertion.claim.aud = null
4190 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4191 sasl.oauthbearer.assertion.claim.iss = null
4192 sasl.oauthbearer.assertion.claim.jti.include = false
4193 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4194 sasl.oauthbearer.assertion.claim.sub = null
4195 sasl.oauthbearer.assertion.file = null
4196 sasl.oauthbearer.assertion.private.key.file = null
4197 sasl.oauthbearer.assertion.private.key.passphrase = null
4198 sasl.oauthbearer.assertion.template.file = null
4199 sasl.oauthbearer.client.credentials.client.id = null
4200 sasl.oauthbearer.client.credentials.client.secret = null
4201 sasl.oauthbearer.clock.skew.seconds = 30
4202 sasl.oauthbearer.expected.audience = null
4203 sasl.oauthbearer.expected.issuer = null
4204 sasl.oauthbearer.header.urlencode = false
4205 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4206 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4207 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4208 sasl.oauthbearer.jwks.endpoint.url = null
4209 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4210 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4211 sasl.oauthbearer.scope = null
4212 sasl.oauthbearer.scope.claim.name = scope
4213 sasl.oauthbearer.sub.claim.name = sub
4214 sasl.oauthbearer.token.endpoint.url = null
4215 security.protocol = PLAINTEXT
4216 security.providers = null
4217 send.buffer.bytes = 131072
4218 socket.connection.setup.timeout.max.ms = 30000
4219 socket.connection.setup.timeout.ms = 10000
4220 ssl.cipher.suites = null
4221 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4222 ssl.endpoint.identification.algorithm = https
4223 ssl.engine.factory.class = null
4224 ssl.key.password = null
4225 ssl.keymanager.algorithm = SunX509
4226 ssl.keystore.certificate.chain = null
4227 ssl.keystore.key = null
4228 ssl.keystore.location = null
4229 ssl.keystore.password = null
4230 ssl.keystore.type = JKS
4231 ssl.protocol = TLSv1.3
4232 ssl.provider = null
4233 ssl.secure.random.implementation = null
4234 ssl.trustmanager.algorithm = PKIX
4235 ssl.truststore.certificates = null
4236 ssl.truststore.location = null
4237 ssl.truststore.password = null
4238 ssl.truststore.type = JKS
4239 transaction.timeout.ms = 60000
4240 transaction.two.phase.commit.enable = false
4241 transactional.id = null
4242 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4243
424401:32:26.657 [virtual-609] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
424501:32:26.657 [virtual-609] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Instantiated an idempotent producer.
424601:32:26.660 [virtual-609] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
424701:32:26.662 [virtual-609] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
424801:32:26.662 [virtual-609] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832346660
424901:32:26.666 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.Metadata - [Producer clientId=producer-5] Cluster ID: 4oa31apqQtabsfPXH-H0RA
425001:32:26.667 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-5] ProducerId set to 4 with epoch 0
425101:32:26.681 [data-plane-kafka-request-handler-4] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t2) to the active controller.
425201:32:26.682 [kafka-producer-network-thread | producer-5] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-5] The metadata response from the cluster reported a recoverable issue with correlation id 5 : {t2=UNKNOWN_TOPIC_OR_PARTITION}
425301:32:26.683 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
425401:32:26.683 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t2 with topic ID h_XYN1lOTWaXc_aOYOshCA.
425501:32:26.683 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t2-0 with topic ID h_XYN1lOTWaXc_aOYOshCA and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
425601:32:26.710 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
425701:32:26.710 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t2-0)
425801:32:26.710 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t2-0 with topic id h_XYN1lOTWaXc_aOYOshCA.
425901:32:26.713 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t2-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
426001:32:26.714 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t2-0 in /tmp/kafka-logs15769196062054598040/t2-0 with properties {}
426101:32:26.714 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] No checkpointed highwatermark is found for partition t2-0
426201:32:26.714 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] Log loaded for partition t2-0 with initial high watermark 0
426301:32:26.715 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t2-0 with topic id Some(h_XYN1lOTWaXc_aOYOshCA) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
426401:32:26.897 [virtual-613] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
426501:32:26.899 [virtual-613] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
426601:32:26.899 [virtual-613] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
426701:32:26.899 [virtual-613] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
426801:32:26.899 [virtual-613] INFO o.a.k.c.m.Metrics - Metrics reporters closed
426901:32:26.900 [virtual-613] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-5 unregistered
427001:32:26.903 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4271 allow.auto.create.topics = true
4272 auto.commit.interval.ms = 5000
4273 auto.offset.reset = earliest
4274 bootstrap.servers = [localhost:6001]
4275 check.crcs = true
4276 client.dns.lookup = use_all_dns_ips
4277 client.id = consumer-embedded-kafka-spec-2
4278 client.rack =
4279 connections.max.idle.ms = 540000
4280 default.api.timeout.ms = 60000
4281 enable.auto.commit = false
4282 enable.metrics.push = true
4283 exclude.internal.topics = true
4284 fetch.max.bytes = 52428800
4285 fetch.max.wait.ms = 500
4286 fetch.min.bytes = 1
4287 group.id = embedded-kafka-spec
4288 group.instance.id = null
4289 group.protocol = classic
4290 group.remote.assignor = null
4291 heartbeat.interval.ms = 3000
4292 interceptor.classes = []
4293 internal.leave.group.on.close = true
4294 internal.throw.on.fetch.stable.offset.unsupported = false
4295 isolation.level = read_uncommitted
4296 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4297 max.partition.fetch.bytes = 1048576
4298 max.poll.interval.ms = 300000
4299 max.poll.records = 500
4300 metadata.max.age.ms = 300000
4301 metadata.recovery.rebootstrap.trigger.ms = 300000
4302 metadata.recovery.strategy = rebootstrap
4303 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4304 metrics.num.samples = 2
4305 metrics.recording.level = INFO
4306 metrics.sample.window.ms = 30000
4307 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4308 receive.buffer.bytes = 65536
4309 reconnect.backoff.max.ms = 1000
4310 reconnect.backoff.ms = 50
4311 request.timeout.ms = 30000
4312 retry.backoff.max.ms = 1000
4313 retry.backoff.ms = 100
4314 sasl.client.callback.handler.class = null
4315 sasl.jaas.config = null
4316 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4317 sasl.kerberos.min.time.before.relogin = 60000
4318 sasl.kerberos.service.name = null
4319 sasl.kerberos.ticket.renew.jitter = 0.05
4320 sasl.kerberos.ticket.renew.window.factor = 0.8
4321 sasl.login.callback.handler.class = null
4322 sasl.login.class = null
4323 sasl.login.connect.timeout.ms = null
4324 sasl.login.read.timeout.ms = null
4325 sasl.login.refresh.buffer.seconds = 300
4326 sasl.login.refresh.min.period.seconds = 60
4327 sasl.login.refresh.window.factor = 0.8
4328 sasl.login.refresh.window.jitter = 0.05
4329 sasl.login.retry.backoff.max.ms = 10000
4330 sasl.login.retry.backoff.ms = 100
4331 sasl.mechanism = GSSAPI
4332 sasl.oauthbearer.assertion.algorithm = RS256
4333 sasl.oauthbearer.assertion.claim.aud = null
4334 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4335 sasl.oauthbearer.assertion.claim.iss = null
4336 sasl.oauthbearer.assertion.claim.jti.include = false
4337 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4338 sasl.oauthbearer.assertion.claim.sub = null
4339 sasl.oauthbearer.assertion.file = null
4340 sasl.oauthbearer.assertion.private.key.file = null
4341 sasl.oauthbearer.assertion.private.key.passphrase = null
4342 sasl.oauthbearer.assertion.template.file = null
4343 sasl.oauthbearer.client.credentials.client.id = null
4344 sasl.oauthbearer.client.credentials.client.secret = null
4345 sasl.oauthbearer.clock.skew.seconds = 30
4346 sasl.oauthbearer.expected.audience = null
4347 sasl.oauthbearer.expected.issuer = null
4348 sasl.oauthbearer.header.urlencode = false
4349 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4350 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4351 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4352 sasl.oauthbearer.jwks.endpoint.url = null
4353 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4354 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4355 sasl.oauthbearer.scope = null
4356 sasl.oauthbearer.scope.claim.name = scope
4357 sasl.oauthbearer.sub.claim.name = sub
4358 sasl.oauthbearer.token.endpoint.url = null
4359 security.protocol = PLAINTEXT
4360 security.providers = null
4361 send.buffer.bytes = 131072
4362 session.timeout.ms = 45000
4363 share.acknowledgement.mode = implicit
4364 socket.connection.setup.timeout.max.ms = 30000
4365 socket.connection.setup.timeout.ms = 10000
4366 ssl.cipher.suites = null
4367 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4368 ssl.endpoint.identification.algorithm = https
4369 ssl.engine.factory.class = null
4370 ssl.key.password = null
4371 ssl.keymanager.algorithm = SunX509
4372 ssl.keystore.certificate.chain = null
4373 ssl.keystore.key = null
4374 ssl.keystore.location = null
4375 ssl.keystore.password = null
4376 ssl.keystore.type = JKS
4377 ssl.protocol = TLSv1.3
4378 ssl.provider = null
4379 ssl.secure.random.implementation = null
4380 ssl.trustmanager.algorithm = PKIX
4381 ssl.truststore.certificates = null
4382 ssl.truststore.location = null
4383 ssl.truststore.password = null
4384 ssl.truststore.type = JKS
4385 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4386
438701:32:26.903 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
438801:32:26.906 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
438901:32:26.906 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
439001:32:26.906 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832346906
439101:32:26.907 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Subscribed to topic(s): t2
439201:32:26.912 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Cluster ID: 4oa31apqQtabsfPXH-H0RA
439301:32:26.916 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
439401:32:26.916 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439501:32:26.919 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1 and requesting the member to rejoin with this id.
439601:32:26.919 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1
439701:32:26.920 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439801:32:26.921 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1 joins group embedded-kafka-spec in Empty state. Adding to the group now.
439901:32:26.921 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1 with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1).
440001:32:29.921 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 1 with 1 members.
440101:32:29.921 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1', protocol='range'}
440201:32:29.922 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Finished assignment for group at generation 1: {consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1=Assignment(partitions=[t2-0])}
440301:32:29.923 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1 for group embedded-kafka-spec for generation 1. The group has 1 members, 0 of which are static.
440401:32:29.929 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1', protocol='range'}
440501:32:29.930 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t2-0])
440601:32:29.930 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t2-0]
440701:32:29.931 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Found no committed offset for partition t2-0
440801:32:29.932 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting offset for partition t2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
440901:32:36.274 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t2-0]
441001:32:36.274 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
441101:32:36.274 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
441201:32:36.275 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
441301:32:36.275 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
441401:32:36.275 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-2-a21d7985-9599-432f-86fe-27d4dd4949d1) members.).
441501:32:36.275 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 2 is now empty.
441601:32:36.282 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
441701:32:36.282 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
441801:32:36.282 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
441901:32:36.282 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
442001:32:36.285 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-2 unregistered
442101:32:36.288 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4422 acks = -1
4423 batch.size = 16384
4424 bootstrap.servers = [localhost:6001]
4425 buffer.memory = 33554432
4426 client.dns.lookup = use_all_dns_ips
4427 client.id = producer-6
4428 compression.gzip.level = -1
4429 compression.lz4.level = 9
4430 compression.type = none
4431 compression.zstd.level = 3
4432 connections.max.idle.ms = 540000
4433 delivery.timeout.ms = 120000
4434 enable.idempotence = true
4435 enable.metrics.push = true
4436 interceptor.classes = []
4437 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4438 linger.ms = 5
4439 max.block.ms = 10000
4440 max.in.flight.requests.per.connection = 5
4441 max.request.size = 1048576
4442 metadata.max.age.ms = 300000
4443 metadata.max.idle.ms = 300000
4444 metadata.recovery.rebootstrap.trigger.ms = 300000
4445 metadata.recovery.strategy = rebootstrap
4446 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4447 metrics.num.samples = 2
4448 metrics.recording.level = INFO
4449 metrics.sample.window.ms = 30000
4450 partitioner.adaptive.partitioning.enable = true
4451 partitioner.availability.timeout.ms = 0
4452 partitioner.class = null
4453 partitioner.ignore.keys = false
4454 receive.buffer.bytes = 32768
4455 reconnect.backoff.max.ms = 1000
4456 reconnect.backoff.ms = 50
4457 request.timeout.ms = 30000
4458 retries = 2147483647
4459 retry.backoff.max.ms = 1000
4460 retry.backoff.ms = 1000
4461 sasl.client.callback.handler.class = null
4462 sasl.jaas.config = null
4463 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4464 sasl.kerberos.min.time.before.relogin = 60000
4465 sasl.kerberos.service.name = null
4466 sasl.kerberos.ticket.renew.jitter = 0.05
4467 sasl.kerberos.ticket.renew.window.factor = 0.8
4468 sasl.login.callback.handler.class = null
4469 sasl.login.class = null
4470 sasl.login.connect.timeout.ms = null
4471 sasl.login.read.timeout.ms = null
4472 sasl.login.refresh.buffer.seconds = 300
4473 sasl.login.refresh.min.period.seconds = 60
4474 sasl.login.refresh.window.factor = 0.8
4475 sasl.login.refresh.window.jitter = 0.05
4476 sasl.login.retry.backoff.max.ms = 10000
4477 sasl.login.retry.backoff.ms = 100
4478 sasl.mechanism = GSSAPI
4479 sasl.oauthbearer.assertion.algorithm = RS256
4480 sasl.oauthbearer.assertion.claim.aud = null
4481 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4482 sasl.oauthbearer.assertion.claim.iss = null
4483 sasl.oauthbearer.assertion.claim.jti.include = false
4484 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4485 sasl.oauthbearer.assertion.claim.sub = null
4486 sasl.oauthbearer.assertion.file = null
4487 sasl.oauthbearer.assertion.private.key.file = null
4488 sasl.oauthbearer.assertion.private.key.passphrase = null
4489 sasl.oauthbearer.assertion.template.file = null
4490 sasl.oauthbearer.client.credentials.client.id = null
4491 sasl.oauthbearer.client.credentials.client.secret = null
4492 sasl.oauthbearer.clock.skew.seconds = 30
4493 sasl.oauthbearer.expected.audience = null
4494 sasl.oauthbearer.expected.issuer = null
4495 sasl.oauthbearer.header.urlencode = false
4496 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4497 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4498 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4499 sasl.oauthbearer.jwks.endpoint.url = null
4500 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4501 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4502 sasl.oauthbearer.scope = null
4503 sasl.oauthbearer.scope.claim.name = scope
4504 sasl.oauthbearer.sub.claim.name = sub
4505 sasl.oauthbearer.token.endpoint.url = null
4506 security.protocol = PLAINTEXT
4507 security.providers = null
4508 send.buffer.bytes = 131072
4509 socket.connection.setup.timeout.max.ms = 30000
4510 socket.connection.setup.timeout.ms = 10000
4511 ssl.cipher.suites = null
4512 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4513 ssl.endpoint.identification.algorithm = https
4514 ssl.engine.factory.class = null
4515 ssl.key.password = null
4516 ssl.keymanager.algorithm = SunX509
4517 ssl.keystore.certificate.chain = null
4518 ssl.keystore.key = null
4519 ssl.keystore.location = null
4520 ssl.keystore.password = null
4521 ssl.keystore.type = JKS
4522 ssl.protocol = TLSv1.3
4523 ssl.provider = null
4524 ssl.secure.random.implementation = null
4525 ssl.trustmanager.algorithm = PKIX
4526 ssl.truststore.certificates = null
4527 ssl.truststore.location = null
4528 ssl.truststore.password = null
4529 ssl.truststore.type = JKS
4530 transaction.timeout.ms = 60000
4531 transaction.two.phase.commit.enable = false
4532 transactional.id = null
4533 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4534
453501:32:36.288 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
453601:32:36.288 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Instantiated an idempotent producer.
453701:32:36.291 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
453801:32:36.291 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
453901:32:36.291 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832356291
454001:32:36.294 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_1) to the active controller.
454101:32:36.295 [kafka-producer-network-thread | producer-6] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-6] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t3_1=UNKNOWN_TOPIC_OR_PARTITION}
454201:32:36.296 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.Metadata - [Producer clientId=producer-6] Cluster ID: 4oa31apqQtabsfPXH-H0RA
454301:32:36.296 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-6] ProducerId set to 5 with epoch 0
454401:32:36.298 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
454501:32:36.298 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_1 with topic ID tTBbGOXlSzSaXJqBUpam3g.
454601:32:36.300 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_1-0 with topic ID tTBbGOXlSzSaXJqBUpam3g and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
454701:32:36.325 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
454801:32:36.325 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_1-0)
454901:32:36.325 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_1-0 with topic id tTBbGOXlSzSaXJqBUpam3g.
455001:32:36.329 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_1-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
455101:32:36.329 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_1-0 in /tmp/kafka-logs15769196062054598040/t3_1-0 with properties {}
455201:32:36.329 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] No checkpointed highwatermark is found for partition t3_1-0
455301:32:36.329 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] Log loaded for partition t3_1-0 with initial high watermark 0
455401:32:36.330 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_1-0 with topic id Some(tTBbGOXlSzSaXJqBUpam3g) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
455501:32:37.307 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
455601:32:37.309 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
455701:32:37.309 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
455801:32:37.309 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
455901:32:37.309 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
456001:32:37.309 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-6 unregistered
456101:32:37.310 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4562 acks = -1
4563 batch.size = 16384
4564 bootstrap.servers = [localhost:6001]
4565 buffer.memory = 33554432
4566 client.dns.lookup = use_all_dns_ips
4567 client.id = producer-7
4568 compression.gzip.level = -1
4569 compression.lz4.level = 9
4570 compression.type = none
4571 compression.zstd.level = 3
4572 connections.max.idle.ms = 540000
4573 delivery.timeout.ms = 120000
4574 enable.idempotence = true
4575 enable.metrics.push = true
4576 interceptor.classes = []
4577 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4578 linger.ms = 5
4579 max.block.ms = 10000
4580 max.in.flight.requests.per.connection = 5
4581 max.request.size = 1048576
4582 metadata.max.age.ms = 300000
4583 metadata.max.idle.ms = 300000
4584 metadata.recovery.rebootstrap.trigger.ms = 300000
4585 metadata.recovery.strategy = rebootstrap
4586 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4587 metrics.num.samples = 2
4588 metrics.recording.level = INFO
4589 metrics.sample.window.ms = 30000
4590 partitioner.adaptive.partitioning.enable = true
4591 partitioner.availability.timeout.ms = 0
4592 partitioner.class = null
4593 partitioner.ignore.keys = false
4594 receive.buffer.bytes = 32768
4595 reconnect.backoff.max.ms = 1000
4596 reconnect.backoff.ms = 50
4597 request.timeout.ms = 30000
4598 retries = 2147483647
4599 retry.backoff.max.ms = 1000
4600 retry.backoff.ms = 1000
4601 sasl.client.callback.handler.class = null
4602 sasl.jaas.config = null
4603 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4604 sasl.kerberos.min.time.before.relogin = 60000
4605 sasl.kerberos.service.name = null
4606 sasl.kerberos.ticket.renew.jitter = 0.05
4607 sasl.kerberos.ticket.renew.window.factor = 0.8
4608 sasl.login.callback.handler.class = null
4609 sasl.login.class = null
4610 sasl.login.connect.timeout.ms = null
4611 sasl.login.read.timeout.ms = null
4612 sasl.login.refresh.buffer.seconds = 300
4613 sasl.login.refresh.min.period.seconds = 60
4614 sasl.login.refresh.window.factor = 0.8
4615 sasl.login.refresh.window.jitter = 0.05
4616 sasl.login.retry.backoff.max.ms = 10000
4617 sasl.login.retry.backoff.ms = 100
4618 sasl.mechanism = GSSAPI
4619 sasl.oauthbearer.assertion.algorithm = RS256
4620 sasl.oauthbearer.assertion.claim.aud = null
4621 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4622 sasl.oauthbearer.assertion.claim.iss = null
4623 sasl.oauthbearer.assertion.claim.jti.include = false
4624 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4625 sasl.oauthbearer.assertion.claim.sub = null
4626 sasl.oauthbearer.assertion.file = null
4627 sasl.oauthbearer.assertion.private.key.file = null
4628 sasl.oauthbearer.assertion.private.key.passphrase = null
4629 sasl.oauthbearer.assertion.template.file = null
4630 sasl.oauthbearer.client.credentials.client.id = null
4631 sasl.oauthbearer.client.credentials.client.secret = null
4632 sasl.oauthbearer.clock.skew.seconds = 30
4633 sasl.oauthbearer.expected.audience = null
4634 sasl.oauthbearer.expected.issuer = null
4635 sasl.oauthbearer.header.urlencode = false
4636 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4637 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4638 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4639 sasl.oauthbearer.jwks.endpoint.url = null
4640 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4641 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4642 sasl.oauthbearer.scope = null
4643 sasl.oauthbearer.scope.claim.name = scope
4644 sasl.oauthbearer.sub.claim.name = sub
4645 sasl.oauthbearer.token.endpoint.url = null
4646 security.protocol = PLAINTEXT
4647 security.providers = null
4648 send.buffer.bytes = 131072
4649 socket.connection.setup.timeout.max.ms = 30000
4650 socket.connection.setup.timeout.ms = 10000
4651 ssl.cipher.suites = null
4652 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4653 ssl.endpoint.identification.algorithm = https
4654 ssl.engine.factory.class = null
4655 ssl.key.password = null
4656 ssl.keymanager.algorithm = SunX509
4657 ssl.keystore.certificate.chain = null
4658 ssl.keystore.key = null
4659 ssl.keystore.location = null
4660 ssl.keystore.password = null
4661 ssl.keystore.type = JKS
4662 ssl.protocol = TLSv1.3
4663 ssl.provider = null
4664 ssl.secure.random.implementation = null
4665 ssl.trustmanager.algorithm = PKIX
4666 ssl.truststore.certificates = null
4667 ssl.truststore.location = null
4668 ssl.truststore.password = null
4669 ssl.truststore.type = JKS
4670 transaction.timeout.ms = 60000
4671 transaction.two.phase.commit.enable = false
4672 transactional.id = null
4673 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4674
467501:32:37.310 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
467601:32:37.310 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Instantiated an idempotent producer.
467701:32:37.313 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
467801:32:37.313 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
467901:32:37.313 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832357313
468001:32:37.316 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.Metadata - [Producer clientId=producer-7] Cluster ID: 4oa31apqQtabsfPXH-H0RA
468101:32:37.316 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-7] ProducerId set to 6 with epoch 0
468201:32:37.324 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
468301:32:37.326 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
468401:32:37.326 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
468501:32:37.326 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
468601:32:37.326 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
468701:32:37.326 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-7 unregistered
468801:32:37.327 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4689 acks = -1
4690 batch.size = 16384
4691 bootstrap.servers = [localhost:6001]
4692 buffer.memory = 33554432
4693 client.dns.lookup = use_all_dns_ips
4694 client.id = producer-8
4695 compression.gzip.level = -1
4696 compression.lz4.level = 9
4697 compression.type = none
4698 compression.zstd.level = 3
4699 connections.max.idle.ms = 540000
4700 delivery.timeout.ms = 120000
4701 enable.idempotence = true
4702 enable.metrics.push = true
4703 interceptor.classes = []
4704 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4705 linger.ms = 5
4706 max.block.ms = 10000
4707 max.in.flight.requests.per.connection = 5
4708 max.request.size = 1048576
4709 metadata.max.age.ms = 300000
4710 metadata.max.idle.ms = 300000
4711 metadata.recovery.rebootstrap.trigger.ms = 300000
4712 metadata.recovery.strategy = rebootstrap
4713 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4714 metrics.num.samples = 2
4715 metrics.recording.level = INFO
4716 metrics.sample.window.ms = 30000
4717 partitioner.adaptive.partitioning.enable = true
4718 partitioner.availability.timeout.ms = 0
4719 partitioner.class = null
4720 partitioner.ignore.keys = false
4721 receive.buffer.bytes = 32768
4722 reconnect.backoff.max.ms = 1000
4723 reconnect.backoff.ms = 50
4724 request.timeout.ms = 30000
4725 retries = 2147483647
4726 retry.backoff.max.ms = 1000
4727 retry.backoff.ms = 1000
4728 sasl.client.callback.handler.class = null
4729 sasl.jaas.config = null
4730 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4731 sasl.kerberos.min.time.before.relogin = 60000
4732 sasl.kerberos.service.name = null
4733 sasl.kerberos.ticket.renew.jitter = 0.05
4734 sasl.kerberos.ticket.renew.window.factor = 0.8
4735 sasl.login.callback.handler.class = null
4736 sasl.login.class = null
4737 sasl.login.connect.timeout.ms = null
4738 sasl.login.read.timeout.ms = null
4739 sasl.login.refresh.buffer.seconds = 300
4740 sasl.login.refresh.min.period.seconds = 60
4741 sasl.login.refresh.window.factor = 0.8
4742 sasl.login.refresh.window.jitter = 0.05
4743 sasl.login.retry.backoff.max.ms = 10000
4744 sasl.login.retry.backoff.ms = 100
4745 sasl.mechanism = GSSAPI
4746 sasl.oauthbearer.assertion.algorithm = RS256
4747 sasl.oauthbearer.assertion.claim.aud = null
4748 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4749 sasl.oauthbearer.assertion.claim.iss = null
4750 sasl.oauthbearer.assertion.claim.jti.include = false
4751 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4752 sasl.oauthbearer.assertion.claim.sub = null
4753 sasl.oauthbearer.assertion.file = null
4754 sasl.oauthbearer.assertion.private.key.file = null
4755 sasl.oauthbearer.assertion.private.key.passphrase = null
4756 sasl.oauthbearer.assertion.template.file = null
4757 sasl.oauthbearer.client.credentials.client.id = null
4758 sasl.oauthbearer.client.credentials.client.secret = null
4759 sasl.oauthbearer.clock.skew.seconds = 30
4760 sasl.oauthbearer.expected.audience = null
4761 sasl.oauthbearer.expected.issuer = null
4762 sasl.oauthbearer.header.urlencode = false
4763 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4764 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4765 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4766 sasl.oauthbearer.jwks.endpoint.url = null
4767 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4768 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4769 sasl.oauthbearer.scope = null
4770 sasl.oauthbearer.scope.claim.name = scope
4771 sasl.oauthbearer.sub.claim.name = sub
4772 sasl.oauthbearer.token.endpoint.url = null
4773 security.protocol = PLAINTEXT
4774 security.providers = null
4775 send.buffer.bytes = 131072
4776 socket.connection.setup.timeout.max.ms = 30000
4777 socket.connection.setup.timeout.ms = 10000
4778 ssl.cipher.suites = null
4779 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4780 ssl.endpoint.identification.algorithm = https
4781 ssl.engine.factory.class = null
4782 ssl.key.password = null
4783 ssl.keymanager.algorithm = SunX509
4784 ssl.keystore.certificate.chain = null
4785 ssl.keystore.key = null
4786 ssl.keystore.location = null
4787 ssl.keystore.password = null
4788 ssl.keystore.type = JKS
4789 ssl.protocol = TLSv1.3
4790 ssl.provider = null
4791 ssl.secure.random.implementation = null
4792 ssl.trustmanager.algorithm = PKIX
4793 ssl.truststore.certificates = null
4794 ssl.truststore.location = null
4795 ssl.truststore.password = null
4796 ssl.truststore.type = JKS
4797 transaction.timeout.ms = 60000
4798 transaction.two.phase.commit.enable = false
4799 transactional.id = null
4800 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4801
480201:32:37.327 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
480301:32:37.327 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Instantiated an idempotent producer.
480401:32:37.330 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
480501:32:37.330 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
480601:32:37.330 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832357330
480701:32:37.332 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.Metadata - [Producer clientId=producer-8] Cluster ID: 4oa31apqQtabsfPXH-H0RA
480801:32:37.333 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-8] ProducerId set to 7 with epoch 0
480901:32:37.341 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
481001:32:37.343 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
481101:32:37.343 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
481201:32:37.343 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
481301:32:37.343 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
481401:32:37.344 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-8 unregistered
481501:32:37.346 [virtual-618] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4816 allow.auto.create.topics = true
4817 auto.commit.interval.ms = 5000
4818 auto.offset.reset = earliest
4819 bootstrap.servers = [localhost:6001]
4820 check.crcs = true
4821 client.dns.lookup = use_all_dns_ips
4822 client.id = consumer-g3_1-3
4823 client.rack =
4824 connections.max.idle.ms = 540000
4825 default.api.timeout.ms = 60000
4826 enable.auto.commit = false
4827 enable.metrics.push = true
4828 exclude.internal.topics = true
4829 fetch.max.bytes = 52428800
4830 fetch.max.wait.ms = 500
4831 fetch.min.bytes = 1
4832 group.id = g3_1
4833 group.instance.id = null
4834 group.protocol = classic
4835 group.remote.assignor = null
4836 heartbeat.interval.ms = 3000
4837 interceptor.classes = []
4838 internal.leave.group.on.close = true
4839 internal.throw.on.fetch.stable.offset.unsupported = false
4840 isolation.level = read_uncommitted
4841 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4842 max.partition.fetch.bytes = 1048576
4843 max.poll.interval.ms = 300000
4844 max.poll.records = 500
4845 metadata.max.age.ms = 300000
4846 metadata.recovery.rebootstrap.trigger.ms = 300000
4847 metadata.recovery.strategy = rebootstrap
4848 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4849 metrics.num.samples = 2
4850 metrics.recording.level = INFO
4851 metrics.sample.window.ms = 30000
4852 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4853 receive.buffer.bytes = 65536
4854 reconnect.backoff.max.ms = 1000
4855 reconnect.backoff.ms = 50
4856 request.timeout.ms = 30000
4857 retry.backoff.max.ms = 1000
4858 retry.backoff.ms = 100
4859 sasl.client.callback.handler.class = null
4860 sasl.jaas.config = null
4861 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4862 sasl.kerberos.min.time.before.relogin = 60000
4863 sasl.kerberos.service.name = null
4864 sasl.kerberos.ticket.renew.jitter = 0.05
4865 sasl.kerberos.ticket.renew.window.factor = 0.8
4866 sasl.login.callback.handler.class = null
4867 sasl.login.class = null
4868 sasl.login.connect.timeout.ms = null
4869 sasl.login.read.timeout.ms = null
4870 sasl.login.refresh.buffer.seconds = 300
4871 sasl.login.refresh.min.period.seconds = 60
4872 sasl.login.refresh.window.factor = 0.8
4873 sasl.login.refresh.window.jitter = 0.05
4874 sasl.login.retry.backoff.max.ms = 10000
4875 sasl.login.retry.backoff.ms = 100
4876 sasl.mechanism = GSSAPI
4877 sasl.oauthbearer.assertion.algorithm = RS256
4878 sasl.oauthbearer.assertion.claim.aud = null
4879 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4880 sasl.oauthbearer.assertion.claim.iss = null
4881 sasl.oauthbearer.assertion.claim.jti.include = false
4882 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4883 sasl.oauthbearer.assertion.claim.sub = null
4884 sasl.oauthbearer.assertion.file = null
4885 sasl.oauthbearer.assertion.private.key.file = null
4886 sasl.oauthbearer.assertion.private.key.passphrase = null
4887 sasl.oauthbearer.assertion.template.file = null
4888 sasl.oauthbearer.client.credentials.client.id = null
4889 sasl.oauthbearer.client.credentials.client.secret = null
4890 sasl.oauthbearer.clock.skew.seconds = 30
4891 sasl.oauthbearer.expected.audience = null
4892 sasl.oauthbearer.expected.issuer = null
4893 sasl.oauthbearer.header.urlencode = false
4894 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4895 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4896 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4897 sasl.oauthbearer.jwks.endpoint.url = null
4898 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4899 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4900 sasl.oauthbearer.scope = null
4901 sasl.oauthbearer.scope.claim.name = scope
4902 sasl.oauthbearer.sub.claim.name = sub
4903 sasl.oauthbearer.token.endpoint.url = null
4904 security.protocol = PLAINTEXT
4905 security.providers = null
4906 send.buffer.bytes = 131072
4907 session.timeout.ms = 45000
4908 share.acknowledgement.mode = implicit
4909 socket.connection.setup.timeout.max.ms = 30000
4910 socket.connection.setup.timeout.ms = 10000
4911 ssl.cipher.suites = null
4912 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4913 ssl.endpoint.identification.algorithm = https
4914 ssl.engine.factory.class = null
4915 ssl.key.password = null
4916 ssl.keymanager.algorithm = SunX509
4917 ssl.keystore.certificate.chain = null
4918 ssl.keystore.key = null
4919 ssl.keystore.location = null
4920 ssl.keystore.password = null
4921 ssl.keystore.type = JKS
4922 ssl.protocol = TLSv1.3
4923 ssl.provider = null
4924 ssl.secure.random.implementation = null
4925 ssl.trustmanager.algorithm = PKIX
4926 ssl.truststore.certificates = null
4927 ssl.truststore.location = null
4928 ssl.truststore.password = null
4929 ssl.truststore.type = JKS
4930 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4931
493201:32:37.346 [virtual-620] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4933 allow.auto.create.topics = true
4934 auto.commit.interval.ms = 5000
4935 auto.offset.reset = earliest
4936 bootstrap.servers = [localhost:6001]
4937 check.crcs = true
4938 client.dns.lookup = use_all_dns_ips
4939 client.id = consumer-g3_1-4
4940 client.rack =
4941 connections.max.idle.ms = 540000
4942 default.api.timeout.ms = 60000
4943 enable.auto.commit = false
4944 enable.metrics.push = true
4945 exclude.internal.topics = true
4946 fetch.max.bytes = 52428800
4947 fetch.max.wait.ms = 500
4948 fetch.min.bytes = 1
4949 group.id = g3_1
4950 group.instance.id = null
4951 group.protocol = classic
4952 group.remote.assignor = null
4953 heartbeat.interval.ms = 3000
4954 interceptor.classes = []
4955 internal.leave.group.on.close = true
4956 internal.throw.on.fetch.stable.offset.unsupported = false
4957 isolation.level = read_uncommitted
4958 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4959 max.partition.fetch.bytes = 1048576
4960 max.poll.interval.ms = 300000
4961 max.poll.records = 500
4962 metadata.max.age.ms = 300000
4963 metadata.recovery.rebootstrap.trigger.ms = 300000
4964 metadata.recovery.strategy = rebootstrap
4965 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4966 metrics.num.samples = 2
4967 metrics.recording.level = INFO
4968 metrics.sample.window.ms = 30000
4969 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4970 receive.buffer.bytes = 65536
4971 reconnect.backoff.max.ms = 1000
4972 reconnect.backoff.ms = 50
4973 request.timeout.ms = 30000
4974 retry.backoff.max.ms = 1000
4975 retry.backoff.ms = 100
4976 sasl.client.callback.handler.class = null
4977 sasl.jaas.config = null
4978 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4979 sasl.kerberos.min.time.before.relogin = 60000
4980 sasl.kerberos.service.name = null
4981 sasl.kerberos.ticket.renew.jitter = 0.05
4982 sasl.kerberos.ticket.renew.window.factor = 0.8
4983 sasl.login.callback.handler.class = null
4984 sasl.login.class = null
4985 sasl.login.connect.timeout.ms = null
4986 sasl.login.read.timeout.ms = null
4987 sasl.login.refresh.buffer.seconds = 300
4988 sasl.login.refresh.min.period.seconds = 60
4989 sasl.login.refresh.window.factor = 0.8
4990 sasl.login.refresh.window.jitter = 0.05
4991 sasl.login.retry.backoff.max.ms = 10000
4992 sasl.login.retry.backoff.ms = 100
4993 sasl.mechanism = GSSAPI
4994 sasl.oauthbearer.assertion.algorithm = RS256
4995 sasl.oauthbearer.assertion.claim.aud = null
4996 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4997 sasl.oauthbearer.assertion.claim.iss = null
4998 sasl.oauthbearer.assertion.claim.jti.include = false
4999 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5000 sasl.oauthbearer.assertion.claim.sub = null
5001 sasl.oauthbearer.assertion.file = null
5002 sasl.oauthbearer.assertion.private.key.file = null
5003 sasl.oauthbearer.assertion.private.key.passphrase = null
5004 sasl.oauthbearer.assertion.template.file = null
5005 sasl.oauthbearer.client.credentials.client.id = null
5006 sasl.oauthbearer.client.credentials.client.secret = null
5007 sasl.oauthbearer.clock.skew.seconds = 30
5008 sasl.oauthbearer.expected.audience = null
5009 sasl.oauthbearer.expected.issuer = null
5010 sasl.oauthbearer.header.urlencode = false
5011 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5012 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5013 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5014 sasl.oauthbearer.jwks.endpoint.url = null
5015 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5016 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5017 sasl.oauthbearer.scope = null
5018 sasl.oauthbearer.scope.claim.name = scope
5019 sasl.oauthbearer.sub.claim.name = sub
5020 sasl.oauthbearer.token.endpoint.url = null
5021 security.protocol = PLAINTEXT
5022 security.providers = null
5023 send.buffer.bytes = 131072
5024 session.timeout.ms = 45000
5025 share.acknowledgement.mode = implicit
5026 socket.connection.setup.timeout.max.ms = 30000
5027 socket.connection.setup.timeout.ms = 10000
5028 ssl.cipher.suites = null
5029 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5030 ssl.endpoint.identification.algorithm = https
5031 ssl.engine.factory.class = null
5032 ssl.key.password = null
5033 ssl.keymanager.algorithm = SunX509
5034 ssl.keystore.certificate.chain = null
5035 ssl.keystore.key = null
5036 ssl.keystore.location = null
5037 ssl.keystore.password = null
5038 ssl.keystore.type = JKS
5039 ssl.protocol = TLSv1.3
5040 ssl.provider = null
5041 ssl.secure.random.implementation = null
5042 ssl.trustmanager.algorithm = PKIX
5043 ssl.truststore.certificates = null
5044 ssl.truststore.location = null
5045 ssl.truststore.password = null
5046 ssl.truststore.type = JKS
5047 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5048
504901:32:37.346 [virtual-618] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
505001:32:37.348 [virtual-620] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
505101:32:37.349 [virtual-618] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505201:32:37.349 [virtual-618] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505301:32:37.349 [virtual-618] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832357349
505401:32:37.350 [virtual-623] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Subscribed to topic(s): t3_2
505501:32:37.353 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505601:32:37.353 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505701:32:37.353 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832357353
505801:32:37.355 [virtual-620] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5059 acks = -1
5060 batch.size = 16384
5061 bootstrap.servers = [localhost:6001]
5062 buffer.memory = 33554432
5063 client.dns.lookup = use_all_dns_ips
5064 client.id = producer-9
5065 compression.gzip.level = -1
5066 compression.lz4.level = 9
5067 compression.type = none
5068 compression.zstd.level = 3
5069 connections.max.idle.ms = 540000
5070 delivery.timeout.ms = 120000
5071 enable.idempotence = true
5072 enable.metrics.push = true
5073 interceptor.classes = []
5074 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5075 linger.ms = 5
5076 max.block.ms = 60000
5077 max.in.flight.requests.per.connection = 5
5078 max.request.size = 1048576
5079 metadata.max.age.ms = 300000
5080 metadata.max.idle.ms = 300000
5081 metadata.recovery.rebootstrap.trigger.ms = 300000
5082 metadata.recovery.strategy = rebootstrap
5083 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5084 metrics.num.samples = 2
5085 metrics.recording.level = INFO
5086 metrics.sample.window.ms = 30000
5087 partitioner.adaptive.partitioning.enable = true
5088 partitioner.availability.timeout.ms = 0
5089 partitioner.class = null
5090 partitioner.ignore.keys = false
5091 receive.buffer.bytes = 32768
5092 reconnect.backoff.max.ms = 1000
5093 reconnect.backoff.ms = 50
5094 request.timeout.ms = 30000
5095 retries = 2147483647
5096 retry.backoff.max.ms = 1000
5097 retry.backoff.ms = 100
5098 sasl.client.callback.handler.class = null
5099 sasl.jaas.config = null
5100 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5101 sasl.kerberos.min.time.before.relogin = 60000
5102 sasl.kerberos.service.name = null
5103 sasl.kerberos.ticket.renew.jitter = 0.05
5104 sasl.kerberos.ticket.renew.window.factor = 0.8
5105 sasl.login.callback.handler.class = null
5106 sasl.login.class = null
5107 sasl.login.connect.timeout.ms = null
5108 sasl.login.read.timeout.ms = null
5109 sasl.login.refresh.buffer.seconds = 300
5110 sasl.login.refresh.min.period.seconds = 60
5111 sasl.login.refresh.window.factor = 0.8
5112 sasl.login.refresh.window.jitter = 0.05
5113 sasl.login.retry.backoff.max.ms = 10000
5114 sasl.login.retry.backoff.ms = 100
5115 sasl.mechanism = GSSAPI
5116 sasl.oauthbearer.assertion.algorithm = RS256
5117 sasl.oauthbearer.assertion.claim.aud = null
5118 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5119 sasl.oauthbearer.assertion.claim.iss = null
5120 sasl.oauthbearer.assertion.claim.jti.include = false
5121 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5122 sasl.oauthbearer.assertion.claim.sub = null
5123 sasl.oauthbearer.assertion.file = null
5124 sasl.oauthbearer.assertion.private.key.file = null
5125 sasl.oauthbearer.assertion.private.key.passphrase = null
5126 sasl.oauthbearer.assertion.template.file = null
5127 sasl.oauthbearer.client.credentials.client.id = null
5128 sasl.oauthbearer.client.credentials.client.secret = null
5129 sasl.oauthbearer.clock.skew.seconds = 30
5130 sasl.oauthbearer.expected.audience = null
5131 sasl.oauthbearer.expected.issuer = null
5132 sasl.oauthbearer.header.urlencode = false
5133 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5134 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5135 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5136 sasl.oauthbearer.jwks.endpoint.url = null
5137 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5138 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5139 sasl.oauthbearer.scope = null
5140 sasl.oauthbearer.scope.claim.name = scope
5141 sasl.oauthbearer.sub.claim.name = sub
5142 sasl.oauthbearer.token.endpoint.url = null
5143 security.protocol = PLAINTEXT
5144 security.providers = null
5145 send.buffer.bytes = 131072
5146 socket.connection.setup.timeout.max.ms = 30000
5147 socket.connection.setup.timeout.ms = 10000
5148 ssl.cipher.suites = null
5149 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5150 ssl.endpoint.identification.algorithm = https
5151 ssl.engine.factory.class = null
5152 ssl.key.password = null
5153 ssl.keymanager.algorithm = SunX509
5154 ssl.keystore.certificate.chain = null
5155 ssl.keystore.key = null
5156 ssl.keystore.location = null
5157 ssl.keystore.password = null
5158 ssl.keystore.type = JKS
5159 ssl.protocol = TLSv1.3
5160 ssl.provider = null
5161 ssl.secure.random.implementation = null
5162 ssl.trustmanager.algorithm = PKIX
5163 ssl.truststore.certificates = null
5164 ssl.truststore.location = null
5165 ssl.truststore.password = null
5166 ssl.truststore.type = JKS
5167 transaction.timeout.ms = 60000
5168 transaction.two.phase.commit.enable = false
5169 transactional.id = null
5170 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5171
517201:32:37.355 [virtual-620] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
517301:32:37.356 [virtual-620] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Instantiated an idempotent producer.
517401:32:37.356 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_2) to the active controller.
517501:32:37.357 [virtual-623] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t3_2=UNKNOWN_TOPIC_OR_PARTITION}
517601:32:37.357 [virtual-623] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
517701:32:37.358 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
517801:32:37.361 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
517901:32:37.362 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
518001:32:37.363 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_2 with topic ID cg2GFdd8SqiaI2w8H93hKw.
518101:32:37.363 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_2-0 with topic ID cg2GFdd8SqiaI2w8H93hKw and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
518201:32:37.364 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
518301:32:37.364 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
518401:32:37.364 [virtual-620] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832357363
518501:32:37.367 [virtual-624] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Subscribed to topic(s): t3_1
518601:32:37.369 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b and requesting the member to rejoin with this id.
518701:32:37.371 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b
518801:32:37.371 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
518901:32:37.372 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b joins group g3_1 in Empty state. Adding to the group now.
519001:32:37.372 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b).
519101:32:37.373 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.Metadata - [Producer clientId=producer-9] Cluster ID: 4oa31apqQtabsfPXH-H0RA
519201:32:37.373 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-9] ProducerId set to 8 with epoch 0
519301:32:37.373 [virtual-624] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
519401:32:37.375 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
519501:32:37.377 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
519601:32:37.379 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in PreparingRebalance state. Created a new member id consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c and requesting the member to rejoin with this id.
519701:32:37.379 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c
519801:32:37.379 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
519901:32:37.383 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c joins group g3_1 in PreparingRebalance state. Adding to the group now.
520001:32:37.389 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
520101:32:37.390 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_2-0)
520201:32:37.390 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_2-0 with topic id cg2GFdd8SqiaI2w8H93hKw.
520301:32:37.392 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_2-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
520401:32:37.392 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_2-0 in /tmp/kafka-logs15769196062054598040/t3_2-0 with properties {}
520501:32:37.393 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] No checkpointed highwatermark is found for partition t3_2-0
520601:32:37.393 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] Log loaded for partition t3_2-0 with initial high watermark 0
520701:32:37.393 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_2-0 with topic id Some(cg2GFdd8SqiaI2w8H93hKw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
520801:32:43.373 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 1 with 2 members.
520901:32:43.374 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b', protocol='range'}
521001:32:43.374 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c', protocol='range'}
521101:32:43.377 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Finished assignment for group at generation 1: {consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c=Assignment(partitions=[t3_1-0]), consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b=Assignment(partitions=[t3_2-0])}
521201:32:43.377 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b for group g3_1 for generation 1. The group has 2 members, 0 of which are static.
521301:32:43.383 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b', protocol='range'}
521401:32:43.383 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c', protocol='range'}
521501:32:43.384 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_2-0])
521601:32:43.384 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
521701:32:43.384 [virtual-623] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Adding newly assigned partitions: [t3_2-0]
521801:32:43.384 [virtual-624] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
521901:32:43.385 [virtual-623] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Found no committed offset for partition t3_2-0
522001:32:43.385 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Found no committed offset for partition t3_1-0
522101:32:43.387 [virtual-623] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting offset for partition t3_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522201:32:43.388 [virtual-624] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522301:32:45.403 [virtual-622] ERROR o.k.KafkaFlow$ - Exception when polling for records
5224java.lang.InterruptedException: null
5225 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5226 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5227 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5228 at ox.channels.ActorRef.ask(actor.scala:64)
5229 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5230 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5231 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5232 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5233 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5234 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5235 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5236 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5237 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5238 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
523901:32:45.403 [virtual-623] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5240java.lang.InterruptedException: null
5241 ... 18 common frames omitted
5242Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5243 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5244 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5245 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5246 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5247 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5248 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5249 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5250 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5251 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5252 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5253 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5254 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5255 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5256 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5257 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5258 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5259 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5260 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
526101:32:45.404 [virtual-628] ERROR o.k.KafkaFlow$ - Exception when polling for records
5262java.lang.InterruptedException: null
5263 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5264 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5265 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5266 at ox.channels.ActorRef.ask(actor.scala:64)
5267 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5268 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
5269 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5270 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5271 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
5272 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5273 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5274 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
5275 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
5276 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5277 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
5278 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
5279 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
5280 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
5281 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5282 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5283 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
528401:32:45.403 [virtual-624] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5285java.lang.InterruptedException: null
5286 ... 18 common frames omitted
5287Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5288 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5289 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5290 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5291 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5292 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5293 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5294 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5295 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5296 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5297 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5298 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5299 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5300 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5301 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5302 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5303 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5304 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5305 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
530601:32:45.404 [virtual-634] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Revoke previously assigned partitions [t3_2-0]
530701:32:45.405 [virtual-634] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Member consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
530801:32:45.405 [virtual-634] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
530901:32:45.405 [virtual-634] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
531001:32:45.405 [virtual-635] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
531101:32:45.406 [virtual-636] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
531201:32:45.406 [virtual-635] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Member consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
531301:32:45.406 [virtual-635] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
531401:32:45.406 [virtual-635] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
531501:32:45.406 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
531601:32:45.407 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_1-3-4b4b216f-1ec5-4dea-93c6-ab466d6ab14b) members.).
531701:32:45.407 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-4-47c0d0d6-cb90-48d9-b0ed-464bc737b29c has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
531801:32:45.407 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 2 is now empty.
531901:32:45.409 [virtual-634] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
532001:32:45.409 [virtual-634] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
532101:32:45.409 [virtual-634] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
532201:32:45.409 [virtual-634] INFO o.a.k.c.m.Metrics - Metrics reporters closed
532301:32:45.412 [virtual-636] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
532401:32:45.412 [virtual-636] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
532501:32:45.412 [virtual-636] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
532601:32:45.412 [virtual-636] INFO o.a.k.c.m.Metrics - Metrics reporters closed
532701:32:45.412 [virtual-634] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-3 unregistered
532801:32:45.413 [virtual-636] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-9 unregistered
532901:32:45.898 [virtual-635] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
533001:32:45.898 [virtual-635] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
533101:32:45.899 [virtual-635] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
533201:32:45.899 [virtual-635] INFO o.a.k.c.m.Metrics - Metrics reporters closed
533301:32:45.900 [virtual-635] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-4 unregistered
533401:32:45.901 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5335 acks = -1
5336 batch.size = 16384
5337 bootstrap.servers = [localhost:6001]
5338 buffer.memory = 33554432
5339 client.dns.lookup = use_all_dns_ips
5340 client.id = producer-10
5341 compression.gzip.level = -1
5342 compression.lz4.level = 9
5343 compression.type = none
5344 compression.zstd.level = 3
5345 connections.max.idle.ms = 540000
5346 delivery.timeout.ms = 120000
5347 enable.idempotence = true
5348 enable.metrics.push = true
5349 interceptor.classes = []
5350 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5351 linger.ms = 5
5352 max.block.ms = 10000
5353 max.in.flight.requests.per.connection = 5
5354 max.request.size = 1048576
5355 metadata.max.age.ms = 300000
5356 metadata.max.idle.ms = 300000
5357 metadata.recovery.rebootstrap.trigger.ms = 300000
5358 metadata.recovery.strategy = rebootstrap
5359 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5360 metrics.num.samples = 2
5361 metrics.recording.level = INFO
5362 metrics.sample.window.ms = 30000
5363 partitioner.adaptive.partitioning.enable = true
5364 partitioner.availability.timeout.ms = 0
5365 partitioner.class = null
5366 partitioner.ignore.keys = false
5367 receive.buffer.bytes = 32768
5368 reconnect.backoff.max.ms = 1000
5369 reconnect.backoff.ms = 50
5370 request.timeout.ms = 30000
5371 retries = 2147483647
5372 retry.backoff.max.ms = 1000
5373 retry.backoff.ms = 1000
5374 sasl.client.callback.handler.class = null
5375 sasl.jaas.config = null
5376 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5377 sasl.kerberos.min.time.before.relogin = 60000
5378 sasl.kerberos.service.name = null
5379 sasl.kerberos.ticket.renew.jitter = 0.05
5380 sasl.kerberos.ticket.renew.window.factor = 0.8
5381 sasl.login.callback.handler.class = null
5382 sasl.login.class = null
5383 sasl.login.connect.timeout.ms = null
5384 sasl.login.read.timeout.ms = null
5385 sasl.login.refresh.buffer.seconds = 300
5386 sasl.login.refresh.min.period.seconds = 60
5387 sasl.login.refresh.window.factor = 0.8
5388 sasl.login.refresh.window.jitter = 0.05
5389 sasl.login.retry.backoff.max.ms = 10000
5390 sasl.login.retry.backoff.ms = 100
5391 sasl.mechanism = GSSAPI
5392 sasl.oauthbearer.assertion.algorithm = RS256
5393 sasl.oauthbearer.assertion.claim.aud = null
5394 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5395 sasl.oauthbearer.assertion.claim.iss = null
5396 sasl.oauthbearer.assertion.claim.jti.include = false
5397 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5398 sasl.oauthbearer.assertion.claim.sub = null
5399 sasl.oauthbearer.assertion.file = null
5400 sasl.oauthbearer.assertion.private.key.file = null
5401 sasl.oauthbearer.assertion.private.key.passphrase = null
5402 sasl.oauthbearer.assertion.template.file = null
5403 sasl.oauthbearer.client.credentials.client.id = null
5404 sasl.oauthbearer.client.credentials.client.secret = null
5405 sasl.oauthbearer.clock.skew.seconds = 30
5406 sasl.oauthbearer.expected.audience = null
5407 sasl.oauthbearer.expected.issuer = null
5408 sasl.oauthbearer.header.urlencode = false
5409 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5410 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5411 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5412 sasl.oauthbearer.jwks.endpoint.url = null
5413 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5414 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5415 sasl.oauthbearer.scope = null
5416 sasl.oauthbearer.scope.claim.name = scope
5417 sasl.oauthbearer.sub.claim.name = sub
5418 sasl.oauthbearer.token.endpoint.url = null
5419 security.protocol = PLAINTEXT
5420 security.providers = null
5421 send.buffer.bytes = 131072
5422 socket.connection.setup.timeout.max.ms = 30000
5423 socket.connection.setup.timeout.ms = 10000
5424 ssl.cipher.suites = null
5425 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5426 ssl.endpoint.identification.algorithm = https
5427 ssl.engine.factory.class = null
5428 ssl.key.password = null
5429 ssl.keymanager.algorithm = SunX509
5430 ssl.keystore.certificate.chain = null
5431 ssl.keystore.key = null
5432 ssl.keystore.location = null
5433 ssl.keystore.password = null
5434 ssl.keystore.type = JKS
5435 ssl.protocol = TLSv1.3
5436 ssl.provider = null
5437 ssl.secure.random.implementation = null
5438 ssl.trustmanager.algorithm = PKIX
5439 ssl.truststore.certificates = null
5440 ssl.truststore.location = null
5441 ssl.truststore.password = null
5442 ssl.truststore.type = JKS
5443 transaction.timeout.ms = 60000
5444 transaction.two.phase.commit.enable = false
5445 transactional.id = null
5446 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5447
544801:32:45.901 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
544901:32:45.902 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Instantiated an idempotent producer.
545001:32:45.904 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
545101:32:45.904 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
545201:32:45.904 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832365904
545301:32:45.906 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.Metadata - [Producer clientId=producer-10] Cluster ID: 4oa31apqQtabsfPXH-H0RA
545401:32:45.907 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-10] ProducerId set to 9 with epoch 0
545501:32:45.915 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
545601:32:45.917 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
545701:32:45.917 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
545801:32:45.917 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
545901:32:45.917 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
546001:32:45.917 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-10 unregistered
546101:32:45.918 [virtual-638] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5462 allow.auto.create.topics = true
5463 auto.commit.interval.ms = 5000
5464 auto.offset.reset = earliest
5465 bootstrap.servers = [localhost:6001]
5466 check.crcs = true
5467 client.dns.lookup = use_all_dns_ips
5468 client.id = consumer-g3_1-5
5469 client.rack =
5470 connections.max.idle.ms = 540000
5471 default.api.timeout.ms = 60000
5472 enable.auto.commit = false
5473 enable.metrics.push = true
5474 exclude.internal.topics = true
5475 fetch.max.bytes = 52428800
5476 fetch.max.wait.ms = 500
5477 fetch.min.bytes = 1
5478 group.id = g3_1
5479 group.instance.id = null
5480 group.protocol = classic
5481 group.remote.assignor = null
5482 heartbeat.interval.ms = 3000
5483 interceptor.classes = []
5484 internal.leave.group.on.close = true
5485 internal.throw.on.fetch.stable.offset.unsupported = false
5486 isolation.level = read_uncommitted
5487 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5488 max.partition.fetch.bytes = 1048576
5489 max.poll.interval.ms = 300000
5490 max.poll.records = 500
5491 metadata.max.age.ms = 300000
5492 metadata.recovery.rebootstrap.trigger.ms = 300000
5493 metadata.recovery.strategy = rebootstrap
5494 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5495 metrics.num.samples = 2
5496 metrics.recording.level = INFO
5497 metrics.sample.window.ms = 30000
5498 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5499 receive.buffer.bytes = 65536
5500 reconnect.backoff.max.ms = 1000
5501 reconnect.backoff.ms = 50
5502 request.timeout.ms = 30000
5503 retry.backoff.max.ms = 1000
5504 retry.backoff.ms = 100
5505 sasl.client.callback.handler.class = null
5506 sasl.jaas.config = null
5507 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5508 sasl.kerberos.min.time.before.relogin = 60000
5509 sasl.kerberos.service.name = null
5510 sasl.kerberos.ticket.renew.jitter = 0.05
5511 sasl.kerberos.ticket.renew.window.factor = 0.8
5512 sasl.login.callback.handler.class = null
5513 sasl.login.class = null
5514 sasl.login.connect.timeout.ms = null
5515 sasl.login.read.timeout.ms = null
5516 sasl.login.refresh.buffer.seconds = 300
5517 sasl.login.refresh.min.period.seconds = 60
5518 sasl.login.refresh.window.factor = 0.8
5519 sasl.login.refresh.window.jitter = 0.05
5520 sasl.login.retry.backoff.max.ms = 10000
5521 sasl.login.retry.backoff.ms = 100
5522 sasl.mechanism = GSSAPI
5523 sasl.oauthbearer.assertion.algorithm = RS256
5524 sasl.oauthbearer.assertion.claim.aud = null
5525 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5526 sasl.oauthbearer.assertion.claim.iss = null
5527 sasl.oauthbearer.assertion.claim.jti.include = false
5528 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5529 sasl.oauthbearer.assertion.claim.sub = null
5530 sasl.oauthbearer.assertion.file = null
5531 sasl.oauthbearer.assertion.private.key.file = null
5532 sasl.oauthbearer.assertion.private.key.passphrase = null
5533 sasl.oauthbearer.assertion.template.file = null
5534 sasl.oauthbearer.client.credentials.client.id = null
5535 sasl.oauthbearer.client.credentials.client.secret = null
5536 sasl.oauthbearer.clock.skew.seconds = 30
5537 sasl.oauthbearer.expected.audience = null
5538 sasl.oauthbearer.expected.issuer = null
5539 sasl.oauthbearer.header.urlencode = false
5540 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5541 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5542 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5543 sasl.oauthbearer.jwks.endpoint.url = null
5544 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5545 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5546 sasl.oauthbearer.scope = null
5547 sasl.oauthbearer.scope.claim.name = scope
5548 sasl.oauthbearer.sub.claim.name = sub
5549 sasl.oauthbearer.token.endpoint.url = null
5550 security.protocol = PLAINTEXT
5551 security.providers = null
5552 send.buffer.bytes = 131072
5553 session.timeout.ms = 45000
5554 share.acknowledgement.mode = implicit
5555 socket.connection.setup.timeout.max.ms = 30000
5556 socket.connection.setup.timeout.ms = 10000
5557 ssl.cipher.suites = null
5558 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5559 ssl.endpoint.identification.algorithm = https
5560 ssl.engine.factory.class = null
5561 ssl.key.password = null
5562 ssl.keymanager.algorithm = SunX509
5563 ssl.keystore.certificate.chain = null
5564 ssl.keystore.key = null
5565 ssl.keystore.location = null
5566 ssl.keystore.password = null
5567 ssl.keystore.type = JKS
5568 ssl.protocol = TLSv1.3
5569 ssl.provider = null
5570 ssl.secure.random.implementation = null
5571 ssl.trustmanager.algorithm = PKIX
5572 ssl.truststore.certificates = null
5573 ssl.truststore.location = null
5574 ssl.truststore.password = null
5575 ssl.truststore.type = JKS
5576 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5577
557801:32:45.919 [virtual-638] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
557901:32:45.922 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
558001:32:45.922 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
558101:32:45.922 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832365922
558201:32:45.922 [virtual-641] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Subscribed to topic(s): t3_1
558301:32:45.925 [virtual-641] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
558401:32:45.925 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
558501:32:45.926 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
558601:32:45.928 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046 and requesting the member to rejoin with this id.
558701:32:45.928 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046
558801:32:45.928 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
558901:32:45.929 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046 joins group g3_1 in Empty state. Adding to the group now.
559001:32:45.929 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046).
559101:32:48.929 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 3 with 1 members.
559201:32:48.930 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046', protocol='range'}
559301:32:48.930 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Finished assignment for group at generation 3: {consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046=Assignment(partitions=[t3_1-0])}
559401:32:48.931 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046 for group g3_1 for generation 3. The group has 1 members, 0 of which are static.
559501:32:48.937 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046', protocol='range'}
559601:32:48.937 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
559701:32:48.937 [virtual-641] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
559801:32:48.939 [virtual-641] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t3_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
559901:32:48.944 [virtual-638] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5600 allow.auto.create.topics = true
5601 auto.commit.interval.ms = 5000
5602 auto.offset.reset = earliest
5603 bootstrap.servers = [localhost:6001]
5604 check.crcs = true
5605 client.dns.lookup = use_all_dns_ips
5606 client.id = consumer-g3_2-6
5607 client.rack =
5608 connections.max.idle.ms = 540000
5609 default.api.timeout.ms = 60000
5610 enable.auto.commit = false
5611 enable.metrics.push = true
5612 exclude.internal.topics = true
5613 fetch.max.bytes = 52428800
5614 fetch.max.wait.ms = 500
5615 fetch.min.bytes = 1
5616 group.id = g3_2
5617 group.instance.id = null
5618 group.protocol = classic
5619 group.remote.assignor = null
5620 heartbeat.interval.ms = 3000
5621 interceptor.classes = []
5622 internal.leave.group.on.close = true
5623 internal.throw.on.fetch.stable.offset.unsupported = false
5624 isolation.level = read_uncommitted
5625 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5626 max.partition.fetch.bytes = 1048576
5627 max.poll.interval.ms = 300000
5628 max.poll.records = 500
5629 metadata.max.age.ms = 300000
5630 metadata.recovery.rebootstrap.trigger.ms = 300000
5631 metadata.recovery.strategy = rebootstrap
5632 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5633 metrics.num.samples = 2
5634 metrics.recording.level = INFO
5635 metrics.sample.window.ms = 30000
5636 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5637 receive.buffer.bytes = 65536
5638 reconnect.backoff.max.ms = 1000
5639 reconnect.backoff.ms = 50
5640 request.timeout.ms = 30000
5641 retry.backoff.max.ms = 1000
5642 retry.backoff.ms = 100
5643 sasl.client.callback.handler.class = null
5644 sasl.jaas.config = null
5645 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5646 sasl.kerberos.min.time.before.relogin = 60000
5647 sasl.kerberos.service.name = null
5648 sasl.kerberos.ticket.renew.jitter = 0.05
5649 sasl.kerberos.ticket.renew.window.factor = 0.8
5650 sasl.login.callback.handler.class = null
5651 sasl.login.class = null
5652 sasl.login.connect.timeout.ms = null
5653 sasl.login.read.timeout.ms = null
5654 sasl.login.refresh.buffer.seconds = 300
5655 sasl.login.refresh.min.period.seconds = 60
5656 sasl.login.refresh.window.factor = 0.8
5657 sasl.login.refresh.window.jitter = 0.05
5658 sasl.login.retry.backoff.max.ms = 10000
5659 sasl.login.retry.backoff.ms = 100
5660 sasl.mechanism = GSSAPI
5661 sasl.oauthbearer.assertion.algorithm = RS256
5662 sasl.oauthbearer.assertion.claim.aud = null
5663 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5664 sasl.oauthbearer.assertion.claim.iss = null
5665 sasl.oauthbearer.assertion.claim.jti.include = false
5666 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5667 sasl.oauthbearer.assertion.claim.sub = null
5668 sasl.oauthbearer.assertion.file = null
5669 sasl.oauthbearer.assertion.private.key.file = null
5670 sasl.oauthbearer.assertion.private.key.passphrase = null
5671 sasl.oauthbearer.assertion.template.file = null
5672 sasl.oauthbearer.client.credentials.client.id = null
5673 sasl.oauthbearer.client.credentials.client.secret = null
5674 sasl.oauthbearer.clock.skew.seconds = 30
5675 sasl.oauthbearer.expected.audience = null
5676 sasl.oauthbearer.expected.issuer = null
5677 sasl.oauthbearer.header.urlencode = false
5678 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5679 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5680 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5681 sasl.oauthbearer.jwks.endpoint.url = null
5682 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5683 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5684 sasl.oauthbearer.scope = null
5685 sasl.oauthbearer.scope.claim.name = scope
5686 sasl.oauthbearer.sub.claim.name = sub
5687 sasl.oauthbearer.token.endpoint.url = null
5688 security.protocol = PLAINTEXT
5689 security.providers = null
5690 send.buffer.bytes = 131072
5691 session.timeout.ms = 45000
5692 share.acknowledgement.mode = implicit
5693 socket.connection.setup.timeout.max.ms = 30000
5694 socket.connection.setup.timeout.ms = 10000
5695 ssl.cipher.suites = null
5696 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5697 ssl.endpoint.identification.algorithm = https
5698 ssl.engine.factory.class = null
5699 ssl.key.password = null
5700 ssl.keymanager.algorithm = SunX509
5701 ssl.keystore.certificate.chain = null
5702 ssl.keystore.key = null
5703 ssl.keystore.location = null
5704 ssl.keystore.password = null
5705 ssl.keystore.type = JKS
5706 ssl.protocol = TLSv1.3
5707 ssl.provider = null
5708 ssl.secure.random.implementation = null
5709 ssl.trustmanager.algorithm = PKIX
5710 ssl.truststore.certificates = null
5711 ssl.truststore.location = null
5712 ssl.truststore.password = null
5713 ssl.truststore.type = JKS
5714 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5715
571601:32:48.944 [virtual-638] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
571701:32:48.947 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
571801:32:48.947 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
571901:32:48.947 [virtual-638] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832368947
572001:32:48.947 [virtual-645] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Subscribed to topic(s): t3_1
572101:32:48.950 [virtual-645] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Cluster ID: 4oa31apqQtabsfPXH-H0RA
572201:32:48.951 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
572301:32:48.952 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
572401:32:48.953 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_2 in Empty state. Created a new member id consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841 and requesting the member to rejoin with this id.
572501:32:48.954 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: need to re-join with the given member-id: consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841
572601:32:48.954 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
572701:32:48.954 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841 joins group g3_2 in Empty state. Adding to the group now.
572801:32:48.954 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841).
572901:32:51.955 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_2 generation 1 with 1 members.
573001:32:51.955 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841', protocol='range'}
573101:32:51.956 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Finished assignment for group at generation 1: {consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841=Assignment(partitions=[t3_1-0])}
573201:32:51.956 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841 for group g3_2 for generation 1. The group has 1 members, 0 of which are static.
573301:32:51.962 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841', protocol='range'}
573401:32:51.963 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Notifying assignor about the new Assignment(partitions=[t3_1-0])
573501:32:51.963 [virtual-645] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Adding newly assigned partitions: [t3_1-0]
573601:32:51.964 [virtual-645] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Found no committed offset for partition t3_1-0
573701:32:51.967 [virtual-645] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
573801:32:51.970 [virtual-640] ERROR o.k.KafkaFlow$ - Exception when polling for records
5739java.lang.InterruptedException: null
5740 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5741 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5742 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5743 at ox.channels.ActorRef.ask(actor.scala:64)
5744 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5745 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5746 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5747 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5748 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5749 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5750 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5751 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5752 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5753 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
575401:32:51.970 [virtual-644] ERROR o.k.KafkaFlow$ - Exception when polling for records
5755java.lang.InterruptedException: null
5756 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5757 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5758 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5759 at ox.channels.ActorRef.ask(actor.scala:64)
5760 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5761 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5762 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5763 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5764 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5765 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5766 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5767 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5768 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5769 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
577001:32:51.970 [virtual-645] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5771java.lang.InterruptedException: null
5772 ... 18 common frames omitted
5773Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5774 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5775 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5776 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5777 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5778 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5779 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5780 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5781 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5782 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5783 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5784 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5785 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5786 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5787 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5788 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5789 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5790 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5791 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
579201:32:51.970 [virtual-641] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5793java.lang.InterruptedException: null
5794 ... 18 common frames omitted
5795Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5796 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5797 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5798 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5799 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5800 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5801 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5802 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5803 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5804 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5805 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5806 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5807 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5808 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5809 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5810 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5811 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5812 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5813 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
581401:32:51.971 [virtual-648] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Revoke previously assigned partitions [t3_1-0]
581501:32:51.971 [virtual-648] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Member consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
581601:32:51.971 [virtual-648] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting generation and member id due to: consumer pro-actively leaving the group
581701:32:51.971 [virtual-647] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
581801:32:51.971 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Member consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
581901:32:51.971 [virtual-648] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: consumer pro-actively leaving the group
582001:32:51.972 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_2] Member consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
582101:32:51.972 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
582201:32:51.972 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_2-6-89ee0dd2-f42b-4cd5-83a3-7f2cb82c5841) members.).
582301:32:51.972 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
582401:32:51.972 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_2 with generation 2 is now empty.
582501:32:51.973 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
582601:32:51.973 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g3_1-5-60f732bf-edde-4386-9c11-66f73032b046) members.).
582701:32:51.973 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 4 is now empty.
582801:32:52.454 [virtual-647] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
582901:32:52.454 [virtual-647] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
583001:32:52.454 [virtual-647] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
583101:32:52.454 [virtual-647] INFO o.a.k.c.m.Metrics - Metrics reporters closed
583201:32:52.456 [virtual-647] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-5 unregistered
583301:32:52.472 [virtual-648] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
583401:32:52.472 [virtual-648] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
583501:32:52.472 [virtual-648] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
583601:32:52.472 [virtual-648] INFO o.a.k.c.m.Metrics - Metrics reporters closed
583701:32:52.474 [virtual-648] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_2-6 unregistered
583801:32:52.477 [virtual-649] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5839 acks = -1
5840 batch.size = 16384
5841 bootstrap.servers = [localhost:6001]
5842 buffer.memory = 33554432
5843 client.dns.lookup = use_all_dns_ips
5844 client.id = producer-11
5845 compression.gzip.level = -1
5846 compression.lz4.level = 9
5847 compression.type = none
5848 compression.zstd.level = 3
5849 connections.max.idle.ms = 540000
5850 delivery.timeout.ms = 120000
5851 enable.idempotence = true
5852 enable.metrics.push = true
5853 interceptor.classes = []
5854 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5855 linger.ms = 5
5856 max.block.ms = 60000
5857 max.in.flight.requests.per.connection = 5
5858 max.request.size = 1048576
5859 metadata.max.age.ms = 300000
5860 metadata.max.idle.ms = 300000
5861 metadata.recovery.rebootstrap.trigger.ms = 300000
5862 metadata.recovery.strategy = rebootstrap
5863 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5864 metrics.num.samples = 2
5865 metrics.recording.level = INFO
5866 metrics.sample.window.ms = 30000
5867 partitioner.adaptive.partitioning.enable = true
5868 partitioner.availability.timeout.ms = 0
5869 partitioner.class = null
5870 partitioner.ignore.keys = false
5871 receive.buffer.bytes = 32768
5872 reconnect.backoff.max.ms = 1000
5873 reconnect.backoff.ms = 50
5874 request.timeout.ms = 30000
5875 retries = 2147483647
5876 retry.backoff.max.ms = 1000
5877 retry.backoff.ms = 100
5878 sasl.client.callback.handler.class = null
5879 sasl.jaas.config = null
5880 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5881 sasl.kerberos.min.time.before.relogin = 60000
5882 sasl.kerberos.service.name = null
5883 sasl.kerberos.ticket.renew.jitter = 0.05
5884 sasl.kerberos.ticket.renew.window.factor = 0.8
5885 sasl.login.callback.handler.class = null
5886 sasl.login.class = null
5887 sasl.login.connect.timeout.ms = null
5888 sasl.login.read.timeout.ms = null
5889 sasl.login.refresh.buffer.seconds = 300
5890 sasl.login.refresh.min.period.seconds = 60
5891 sasl.login.refresh.window.factor = 0.8
5892 sasl.login.refresh.window.jitter = 0.05
5893 sasl.login.retry.backoff.max.ms = 10000
5894 sasl.login.retry.backoff.ms = 100
5895 sasl.mechanism = GSSAPI
5896 sasl.oauthbearer.assertion.algorithm = RS256
5897 sasl.oauthbearer.assertion.claim.aud = null
5898 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5899 sasl.oauthbearer.assertion.claim.iss = null
5900 sasl.oauthbearer.assertion.claim.jti.include = false
5901 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5902 sasl.oauthbearer.assertion.claim.sub = null
5903 sasl.oauthbearer.assertion.file = null
5904 sasl.oauthbearer.assertion.private.key.file = null
5905 sasl.oauthbearer.assertion.private.key.passphrase = null
5906 sasl.oauthbearer.assertion.template.file = null
5907 sasl.oauthbearer.client.credentials.client.id = null
5908 sasl.oauthbearer.client.credentials.client.secret = null
5909 sasl.oauthbearer.clock.skew.seconds = 30
5910 sasl.oauthbearer.expected.audience = null
5911 sasl.oauthbearer.expected.issuer = null
5912 sasl.oauthbearer.header.urlencode = false
5913 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5914 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5915 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5916 sasl.oauthbearer.jwks.endpoint.url = null
5917 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5918 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5919 sasl.oauthbearer.scope = null
5920 sasl.oauthbearer.scope.claim.name = scope
5921 sasl.oauthbearer.sub.claim.name = sub
5922 sasl.oauthbearer.token.endpoint.url = null
5923 security.protocol = PLAINTEXT
5924 security.providers = null
5925 send.buffer.bytes = 131072
5926 socket.connection.setup.timeout.max.ms = 30000
5927 socket.connection.setup.timeout.ms = 10000
5928 ssl.cipher.suites = null
5929 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5930 ssl.endpoint.identification.algorithm = https
5931 ssl.engine.factory.class = null
5932 ssl.key.password = null
5933 ssl.keymanager.algorithm = SunX509
5934 ssl.keystore.certificate.chain = null
5935 ssl.keystore.key = null
5936 ssl.keystore.location = null
5937 ssl.keystore.password = null
5938 ssl.keystore.type = JKS
5939 ssl.protocol = TLSv1.3
5940 ssl.provider = null
5941 ssl.secure.random.implementation = null
5942 ssl.trustmanager.algorithm = PKIX
5943 ssl.truststore.certificates = null
5944 ssl.truststore.location = null
5945 ssl.truststore.password = null
5946 ssl.truststore.type = JKS
5947 transaction.timeout.ms = 60000
5948 transaction.two.phase.commit.enable = false
5949 transactional.id = null
5950 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5951
595201:32:52.477 [virtual-649] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
595301:32:52.478 [virtual-649] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Instantiated an idempotent producer.
595401:32:52.480 [virtual-649] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
595501:32:52.480 [virtual-649] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
595601:32:52.480 [virtual-649] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832372480
595701:32:52.483 [data-plane-kafka-request-handler-0] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t4) to the active controller.
595801:32:52.483 [kafka-producer-network-thread | producer-11] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-11] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t4=UNKNOWN_TOPIC_OR_PARTITION}
595901:32:52.483 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.Metadata - [Producer clientId=producer-11] Cluster ID: 4oa31apqQtabsfPXH-H0RA
596001:32:52.484 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t4', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
596101:32:52.484 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-11] ProducerId set to 10 with epoch 0
596201:32:52.484 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t4 with topic ID jBCZEPDvRvGwrVI7MdxkmA.
596301:32:52.484 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t4-0 with topic ID jBCZEPDvRvGwrVI7MdxkmA and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
596401:32:52.510 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
596501:32:52.510 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t4-0)
596601:32:52.510 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t4-0 with topic id jBCZEPDvRvGwrVI7MdxkmA.
596701:32:52.512 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t4-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
596801:32:52.513 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t4-0 in /tmp/kafka-logs15769196062054598040/t4-0 with properties {}
596901:32:52.513 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] No checkpointed highwatermark is found for partition t4-0
597001:32:52.513 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] Log loaded for partition t4-0 with initial high watermark 0
597101:32:52.513 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t4-0 with topic id Some(jBCZEPDvRvGwrVI7MdxkmA) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
597201:32:52.600 [virtual-653] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
597301:32:52.604 [virtual-653] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
597401:32:52.604 [virtual-653] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
597501:32:52.604 [virtual-653] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
597601:32:52.604 [virtual-653] INFO o.a.k.c.m.Metrics - Metrics reporters closed
597701:32:52.605 [virtual-653] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-11 unregistered
597801:32:52.605 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5979 allow.auto.create.topics = true
5980 auto.commit.interval.ms = 5000
5981 auto.offset.reset = earliest
5982 bootstrap.servers = [localhost:6001]
5983 check.crcs = true
5984 client.dns.lookup = use_all_dns_ips
5985 client.id = consumer-embedded-kafka-spec-7
5986 client.rack =
5987 connections.max.idle.ms = 540000
5988 default.api.timeout.ms = 60000
5989 enable.auto.commit = false
5990 enable.metrics.push = true
5991 exclude.internal.topics = true
5992 fetch.max.bytes = 52428800
5993 fetch.max.wait.ms = 500
5994 fetch.min.bytes = 1
5995 group.id = embedded-kafka-spec
5996 group.instance.id = null
5997 group.protocol = classic
5998 group.remote.assignor = null
5999 heartbeat.interval.ms = 3000
6000 interceptor.classes = []
6001 internal.leave.group.on.close = true
6002 internal.throw.on.fetch.stable.offset.unsupported = false
6003 isolation.level = read_uncommitted
6004 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6005 max.partition.fetch.bytes = 1048576
6006 max.poll.interval.ms = 300000
6007 max.poll.records = 500
6008 metadata.max.age.ms = 300000
6009 metadata.recovery.rebootstrap.trigger.ms = 300000
6010 metadata.recovery.strategy = rebootstrap
6011 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6012 metrics.num.samples = 2
6013 metrics.recording.level = INFO
6014 metrics.sample.window.ms = 30000
6015 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6016 receive.buffer.bytes = 65536
6017 reconnect.backoff.max.ms = 1000
6018 reconnect.backoff.ms = 50
6019 request.timeout.ms = 30000
6020 retry.backoff.max.ms = 1000
6021 retry.backoff.ms = 100
6022 sasl.client.callback.handler.class = null
6023 sasl.jaas.config = null
6024 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6025 sasl.kerberos.min.time.before.relogin = 60000
6026 sasl.kerberos.service.name = null
6027 sasl.kerberos.ticket.renew.jitter = 0.05
6028 sasl.kerberos.ticket.renew.window.factor = 0.8
6029 sasl.login.callback.handler.class = null
6030 sasl.login.class = null
6031 sasl.login.connect.timeout.ms = null
6032 sasl.login.read.timeout.ms = null
6033 sasl.login.refresh.buffer.seconds = 300
6034 sasl.login.refresh.min.period.seconds = 60
6035 sasl.login.refresh.window.factor = 0.8
6036 sasl.login.refresh.window.jitter = 0.05
6037 sasl.login.retry.backoff.max.ms = 10000
6038 sasl.login.retry.backoff.ms = 100
6039 sasl.mechanism = GSSAPI
6040 sasl.oauthbearer.assertion.algorithm = RS256
6041 sasl.oauthbearer.assertion.claim.aud = null
6042 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6043 sasl.oauthbearer.assertion.claim.iss = null
6044 sasl.oauthbearer.assertion.claim.jti.include = false
6045 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6046 sasl.oauthbearer.assertion.claim.sub = null
6047 sasl.oauthbearer.assertion.file = null
6048 sasl.oauthbearer.assertion.private.key.file = null
6049 sasl.oauthbearer.assertion.private.key.passphrase = null
6050 sasl.oauthbearer.assertion.template.file = null
6051 sasl.oauthbearer.client.credentials.client.id = null
6052 sasl.oauthbearer.client.credentials.client.secret = null
6053 sasl.oauthbearer.clock.skew.seconds = 30
6054 sasl.oauthbearer.expected.audience = null
6055 sasl.oauthbearer.expected.issuer = null
6056 sasl.oauthbearer.header.urlencode = false
6057 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6058 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6059 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6060 sasl.oauthbearer.jwks.endpoint.url = null
6061 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6062 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6063 sasl.oauthbearer.scope = null
6064 sasl.oauthbearer.scope.claim.name = scope
6065 sasl.oauthbearer.sub.claim.name = sub
6066 sasl.oauthbearer.token.endpoint.url = null
6067 security.protocol = PLAINTEXT
6068 security.providers = null
6069 send.buffer.bytes = 131072
6070 session.timeout.ms = 45000
6071 share.acknowledgement.mode = implicit
6072 socket.connection.setup.timeout.max.ms = 30000
6073 socket.connection.setup.timeout.ms = 10000
6074 ssl.cipher.suites = null
6075 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6076 ssl.endpoint.identification.algorithm = https
6077 ssl.engine.factory.class = null
6078 ssl.key.password = null
6079 ssl.keymanager.algorithm = SunX509
6080 ssl.keystore.certificate.chain = null
6081 ssl.keystore.key = null
6082 ssl.keystore.location = null
6083 ssl.keystore.password = null
6084 ssl.keystore.type = JKS
6085 ssl.protocol = TLSv1.3
6086 ssl.provider = null
6087 ssl.secure.random.implementation = null
6088 ssl.trustmanager.algorithm = PKIX
6089 ssl.truststore.certificates = null
6090 ssl.truststore.location = null
6091 ssl.truststore.password = null
6092 ssl.truststore.type = JKS
6093 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6094
609501:32:52.606 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
609601:32:52.608 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
609701:32:52.608 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
609801:32:52.608 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832372608
609901:32:52.608 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Subscribed to topic(s): t4
610001:32:52.610 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Cluster ID: 4oa31apqQtabsfPXH-H0RA
610101:32:52.613 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
610201:32:52.613 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
610301:32:52.615 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80 and requesting the member to rejoin with this id.
610401:32:52.615 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80
610501:32:52.615 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
610601:32:52.616 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80 joins group embedded-kafka-spec in Empty state. Adding to the group now.
610701:32:52.616 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80 with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80).
610801:32:55.617 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 3 with 1 members.
610901:32:55.618 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80', protocol='range'}
611001:32:55.618 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Finished assignment for group at generation 3: {consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80=Assignment(partitions=[t4-0])}
611101:32:55.618 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80 for group embedded-kafka-spec for generation 3. The group has 1 members, 0 of which are static.
611201:32:55.625 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80', protocol='range'}
611301:32:55.625 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t4-0])
611401:32:55.625 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t4-0]
611501:32:55.626 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Found no committed offset for partition t4-0
611601:32:55.627 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting offset for partition t4-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
611701:32:55.646 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t4-0]
611801:32:55.647 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
611901:32:55.647 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
612001:32:55.647 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
612101:32:55.647 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
612201:32:55.647 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-7-f0d20724-65bc-480c-ab2c-6cef663abc80) members.).
612301:32:55.647 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 4 is now empty.
612401:32:56.130 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
612501:32:56.130 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
612601:32:56.130 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
612701:32:56.130 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
612801:32:56.133 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-7 unregistered
612901:32:56.135 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6130 acks = -1
6131 batch.size = 16384
6132 bootstrap.servers = [localhost:6001]
6133 buffer.memory = 33554432
6134 client.dns.lookup = use_all_dns_ips
6135 client.id = producer-12
6136 compression.gzip.level = -1
6137 compression.lz4.level = 9
6138 compression.type = none
6139 compression.zstd.level = 3
6140 connections.max.idle.ms = 540000
6141 delivery.timeout.ms = 120000
6142 enable.idempotence = true
6143 enable.metrics.push = true
6144 interceptor.classes = []
6145 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6146 linger.ms = 5
6147 max.block.ms = 10000
6148 max.in.flight.requests.per.connection = 5
6149 max.request.size = 1048576
6150 metadata.max.age.ms = 300000
6151 metadata.max.idle.ms = 300000
6152 metadata.recovery.rebootstrap.trigger.ms = 300000
6153 metadata.recovery.strategy = rebootstrap
6154 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6155 metrics.num.samples = 2
6156 metrics.recording.level = INFO
6157 metrics.sample.window.ms = 30000
6158 partitioner.adaptive.partitioning.enable = true
6159 partitioner.availability.timeout.ms = 0
6160 partitioner.class = null
6161 partitioner.ignore.keys = false
6162 receive.buffer.bytes = 32768
6163 reconnect.backoff.max.ms = 1000
6164 reconnect.backoff.ms = 50
6165 request.timeout.ms = 30000
6166 retries = 2147483647
6167 retry.backoff.max.ms = 1000
6168 retry.backoff.ms = 1000
6169 sasl.client.callback.handler.class = null
6170 sasl.jaas.config = null
6171 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6172 sasl.kerberos.min.time.before.relogin = 60000
6173 sasl.kerberos.service.name = null
6174 sasl.kerberos.ticket.renew.jitter = 0.05
6175 sasl.kerberos.ticket.renew.window.factor = 0.8
6176 sasl.login.callback.handler.class = null
6177 sasl.login.class = null
6178 sasl.login.connect.timeout.ms = null
6179 sasl.login.read.timeout.ms = null
6180 sasl.login.refresh.buffer.seconds = 300
6181 sasl.login.refresh.min.period.seconds = 60
6182 sasl.login.refresh.window.factor = 0.8
6183 sasl.login.refresh.window.jitter = 0.05
6184 sasl.login.retry.backoff.max.ms = 10000
6185 sasl.login.retry.backoff.ms = 100
6186 sasl.mechanism = GSSAPI
6187 sasl.oauthbearer.assertion.algorithm = RS256
6188 sasl.oauthbearer.assertion.claim.aud = null
6189 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6190 sasl.oauthbearer.assertion.claim.iss = null
6191 sasl.oauthbearer.assertion.claim.jti.include = false
6192 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6193 sasl.oauthbearer.assertion.claim.sub = null
6194 sasl.oauthbearer.assertion.file = null
6195 sasl.oauthbearer.assertion.private.key.file = null
6196 sasl.oauthbearer.assertion.private.key.passphrase = null
6197 sasl.oauthbearer.assertion.template.file = null
6198 sasl.oauthbearer.client.credentials.client.id = null
6199 sasl.oauthbearer.client.credentials.client.secret = null
6200 sasl.oauthbearer.clock.skew.seconds = 30
6201 sasl.oauthbearer.expected.audience = null
6202 sasl.oauthbearer.expected.issuer = null
6203 sasl.oauthbearer.header.urlencode = false
6204 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6205 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6206 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6207 sasl.oauthbearer.jwks.endpoint.url = null
6208 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6209 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6210 sasl.oauthbearer.scope = null
6211 sasl.oauthbearer.scope.claim.name = scope
6212 sasl.oauthbearer.sub.claim.name = sub
6213 sasl.oauthbearer.token.endpoint.url = null
6214 security.protocol = PLAINTEXT
6215 security.providers = null
6216 send.buffer.bytes = 131072
6217 socket.connection.setup.timeout.max.ms = 30000
6218 socket.connection.setup.timeout.ms = 10000
6219 ssl.cipher.suites = null
6220 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6221 ssl.endpoint.identification.algorithm = https
6222 ssl.engine.factory.class = null
6223 ssl.key.password = null
6224 ssl.keymanager.algorithm = SunX509
6225 ssl.keystore.certificate.chain = null
6226 ssl.keystore.key = null
6227 ssl.keystore.location = null
6228 ssl.keystore.password = null
6229 ssl.keystore.type = JKS
6230 ssl.protocol = TLSv1.3
6231 ssl.provider = null
6232 ssl.secure.random.implementation = null
6233 ssl.trustmanager.algorithm = PKIX
6234 ssl.truststore.certificates = null
6235 ssl.truststore.location = null
6236 ssl.truststore.password = null
6237 ssl.truststore.type = JKS
6238 transaction.timeout.ms = 60000
6239 transaction.two.phase.commit.enable = false
6240 transactional.id = null
6241 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6242
624301:32:56.135 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
624401:32:56.135 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Instantiated an idempotent producer.
624501:32:56.138 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
624601:32:56.138 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
624701:32:56.138 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832376138
624801:32:56.140 [data-plane-kafka-request-handler-2] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_1) to the active controller.
624901:32:56.141 [kafka-producer-network-thread | producer-12] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-12] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t5_1=UNKNOWN_TOPIC_OR_PARTITION}
625001:32:56.142 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.Metadata - [Producer clientId=producer-12] Cluster ID: 4oa31apqQtabsfPXH-H0RA
625101:32:56.142 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
625201:32:56.142 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-12] ProducerId set to 11 with epoch 0
625301:32:56.142 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_1 with topic ID QwdaHlQNQWORhS4grr7B5w.
625401:32:56.142 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_1-0 with topic ID QwdaHlQNQWORhS4grr7B5w and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
625501:32:56.168 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
625601:32:56.168 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_1-0)
625701:32:56.168 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_1-0 with topic id QwdaHlQNQWORhS4grr7B5w.
625801:32:56.171 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_1-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
625901:32:56.171 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_1-0 in /tmp/kafka-logs15769196062054598040/t5_1-0 with properties {}
626001:32:56.171 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] No checkpointed highwatermark is found for partition t5_1-0
626101:32:56.171 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] Log loaded for partition t5_1-0 with initial high watermark 0
626201:32:56.172 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_1-0 with topic id Some(QwdaHlQNQWORhS4grr7B5w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
626301:32:57.153 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
626401:32:57.154 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
626501:32:57.154 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
626601:32:57.154 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
626701:32:57.154 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
626801:32:57.155 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-12 unregistered
626901:32:57.155 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6270 acks = -1
6271 batch.size = 16384
6272 bootstrap.servers = [localhost:6001]
6273 buffer.memory = 33554432
6274 client.dns.lookup = use_all_dns_ips
6275 client.id = producer-13
6276 compression.gzip.level = -1
6277 compression.lz4.level = 9
6278 compression.type = none
6279 compression.zstd.level = 3
6280 connections.max.idle.ms = 540000
6281 delivery.timeout.ms = 120000
6282 enable.idempotence = true
6283 enable.metrics.push = true
6284 interceptor.classes = []
6285 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6286 linger.ms = 5
6287 max.block.ms = 10000
6288 max.in.flight.requests.per.connection = 5
6289 max.request.size = 1048576
6290 metadata.max.age.ms = 300000
6291 metadata.max.idle.ms = 300000
6292 metadata.recovery.rebootstrap.trigger.ms = 300000
6293 metadata.recovery.strategy = rebootstrap
6294 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6295 metrics.num.samples = 2
6296 metrics.recording.level = INFO
6297 metrics.sample.window.ms = 30000
6298 partitioner.adaptive.partitioning.enable = true
6299 partitioner.availability.timeout.ms = 0
6300 partitioner.class = null
6301 partitioner.ignore.keys = false
6302 receive.buffer.bytes = 32768
6303 reconnect.backoff.max.ms = 1000
6304 reconnect.backoff.ms = 50
6305 request.timeout.ms = 30000
6306 retries = 2147483647
6307 retry.backoff.max.ms = 1000
6308 retry.backoff.ms = 1000
6309 sasl.client.callback.handler.class = null
6310 sasl.jaas.config = null
6311 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6312 sasl.kerberos.min.time.before.relogin = 60000
6313 sasl.kerberos.service.name = null
6314 sasl.kerberos.ticket.renew.jitter = 0.05
6315 sasl.kerberos.ticket.renew.window.factor = 0.8
6316 sasl.login.callback.handler.class = null
6317 sasl.login.class = null
6318 sasl.login.connect.timeout.ms = null
6319 sasl.login.read.timeout.ms = null
6320 sasl.login.refresh.buffer.seconds = 300
6321 sasl.login.refresh.min.period.seconds = 60
6322 sasl.login.refresh.window.factor = 0.8
6323 sasl.login.refresh.window.jitter = 0.05
6324 sasl.login.retry.backoff.max.ms = 10000
6325 sasl.login.retry.backoff.ms = 100
6326 sasl.mechanism = GSSAPI
6327 sasl.oauthbearer.assertion.algorithm = RS256
6328 sasl.oauthbearer.assertion.claim.aud = null
6329 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6330 sasl.oauthbearer.assertion.claim.iss = null
6331 sasl.oauthbearer.assertion.claim.jti.include = false
6332 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6333 sasl.oauthbearer.assertion.claim.sub = null
6334 sasl.oauthbearer.assertion.file = null
6335 sasl.oauthbearer.assertion.private.key.file = null
6336 sasl.oauthbearer.assertion.private.key.passphrase = null
6337 sasl.oauthbearer.assertion.template.file = null
6338 sasl.oauthbearer.client.credentials.client.id = null
6339 sasl.oauthbearer.client.credentials.client.secret = null
6340 sasl.oauthbearer.clock.skew.seconds = 30
6341 sasl.oauthbearer.expected.audience = null
6342 sasl.oauthbearer.expected.issuer = null
6343 sasl.oauthbearer.header.urlencode = false
6344 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6345 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6346 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6347 sasl.oauthbearer.jwks.endpoint.url = null
6348 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6349 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6350 sasl.oauthbearer.scope = null
6351 sasl.oauthbearer.scope.claim.name = scope
6352 sasl.oauthbearer.sub.claim.name = sub
6353 sasl.oauthbearer.token.endpoint.url = null
6354 security.protocol = PLAINTEXT
6355 security.providers = null
6356 send.buffer.bytes = 131072
6357 socket.connection.setup.timeout.max.ms = 30000
6358 socket.connection.setup.timeout.ms = 10000
6359 ssl.cipher.suites = null
6360 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6361 ssl.endpoint.identification.algorithm = https
6362 ssl.engine.factory.class = null
6363 ssl.key.password = null
6364 ssl.keymanager.algorithm = SunX509
6365 ssl.keystore.certificate.chain = null
6366 ssl.keystore.key = null
6367 ssl.keystore.location = null
6368 ssl.keystore.password = null
6369 ssl.keystore.type = JKS
6370 ssl.protocol = TLSv1.3
6371 ssl.provider = null
6372 ssl.secure.random.implementation = null
6373 ssl.trustmanager.algorithm = PKIX
6374 ssl.truststore.certificates = null
6375 ssl.truststore.location = null
6376 ssl.truststore.password = null
6377 ssl.truststore.type = JKS
6378 transaction.timeout.ms = 60000
6379 transaction.two.phase.commit.enable = false
6380 transactional.id = null
6381 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6382
638301:32:57.155 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
638401:32:57.156 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Instantiated an idempotent producer.
638501:32:57.158 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
638601:32:57.159 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
638701:32:57.159 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832377158
638801:32:57.161 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.Metadata - [Producer clientId=producer-13] Cluster ID: 4oa31apqQtabsfPXH-H0RA
638901:32:57.161 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-13] ProducerId set to 12 with epoch 0
639001:32:57.169 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
639101:32:57.170 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
639201:32:57.170 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
639301:32:57.171 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
639401:32:57.171 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
639501:32:57.171 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-13 unregistered
639601:32:57.171 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6397 acks = -1
6398 batch.size = 16384
6399 bootstrap.servers = [localhost:6001]
6400 buffer.memory = 33554432
6401 client.dns.lookup = use_all_dns_ips
6402 client.id = producer-14
6403 compression.gzip.level = -1
6404 compression.lz4.level = 9
6405 compression.type = none
6406 compression.zstd.level = 3
6407 connections.max.idle.ms = 540000
6408 delivery.timeout.ms = 120000
6409 enable.idempotence = true
6410 enable.metrics.push = true
6411 interceptor.classes = []
6412 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6413 linger.ms = 5
6414 max.block.ms = 10000
6415 max.in.flight.requests.per.connection = 5
6416 max.request.size = 1048576
6417 metadata.max.age.ms = 300000
6418 metadata.max.idle.ms = 300000
6419 metadata.recovery.rebootstrap.trigger.ms = 300000
6420 metadata.recovery.strategy = rebootstrap
6421 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6422 metrics.num.samples = 2
6423 metrics.recording.level = INFO
6424 metrics.sample.window.ms = 30000
6425 partitioner.adaptive.partitioning.enable = true
6426 partitioner.availability.timeout.ms = 0
6427 partitioner.class = null
6428 partitioner.ignore.keys = false
6429 receive.buffer.bytes = 32768
6430 reconnect.backoff.max.ms = 1000
6431 reconnect.backoff.ms = 50
6432 request.timeout.ms = 30000
6433 retries = 2147483647
6434 retry.backoff.max.ms = 1000
6435 retry.backoff.ms = 1000
6436 sasl.client.callback.handler.class = null
6437 sasl.jaas.config = null
6438 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6439 sasl.kerberos.min.time.before.relogin = 60000
6440 sasl.kerberos.service.name = null
6441 sasl.kerberos.ticket.renew.jitter = 0.05
6442 sasl.kerberos.ticket.renew.window.factor = 0.8
6443 sasl.login.callback.handler.class = null
6444 sasl.login.class = null
6445 sasl.login.connect.timeout.ms = null
6446 sasl.login.read.timeout.ms = null
6447 sasl.login.refresh.buffer.seconds = 300
6448 sasl.login.refresh.min.period.seconds = 60
6449 sasl.login.refresh.window.factor = 0.8
6450 sasl.login.refresh.window.jitter = 0.05
6451 sasl.login.retry.backoff.max.ms = 10000
6452 sasl.login.retry.backoff.ms = 100
6453 sasl.mechanism = GSSAPI
6454 sasl.oauthbearer.assertion.algorithm = RS256
6455 sasl.oauthbearer.assertion.claim.aud = null
6456 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6457 sasl.oauthbearer.assertion.claim.iss = null
6458 sasl.oauthbearer.assertion.claim.jti.include = false
6459 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6460 sasl.oauthbearer.assertion.claim.sub = null
6461 sasl.oauthbearer.assertion.file = null
6462 sasl.oauthbearer.assertion.private.key.file = null
6463 sasl.oauthbearer.assertion.private.key.passphrase = null
6464 sasl.oauthbearer.assertion.template.file = null
6465 sasl.oauthbearer.client.credentials.client.id = null
6466 sasl.oauthbearer.client.credentials.client.secret = null
6467 sasl.oauthbearer.clock.skew.seconds = 30
6468 sasl.oauthbearer.expected.audience = null
6469 sasl.oauthbearer.expected.issuer = null
6470 sasl.oauthbearer.header.urlencode = false
6471 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6472 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6473 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6474 sasl.oauthbearer.jwks.endpoint.url = null
6475 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6476 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6477 sasl.oauthbearer.scope = null
6478 sasl.oauthbearer.scope.claim.name = scope
6479 sasl.oauthbearer.sub.claim.name = sub
6480 sasl.oauthbearer.token.endpoint.url = null
6481 security.protocol = PLAINTEXT
6482 security.providers = null
6483 send.buffer.bytes = 131072
6484 socket.connection.setup.timeout.max.ms = 30000
6485 socket.connection.setup.timeout.ms = 10000
6486 ssl.cipher.suites = null
6487 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6488 ssl.endpoint.identification.algorithm = https
6489 ssl.engine.factory.class = null
6490 ssl.key.password = null
6491 ssl.keymanager.algorithm = SunX509
6492 ssl.keystore.certificate.chain = null
6493 ssl.keystore.key = null
6494 ssl.keystore.location = null
6495 ssl.keystore.password = null
6496 ssl.keystore.type = JKS
6497 ssl.protocol = TLSv1.3
6498 ssl.provider = null
6499 ssl.secure.random.implementation = null
6500 ssl.trustmanager.algorithm = PKIX
6501 ssl.truststore.certificates = null
6502 ssl.truststore.location = null
6503 ssl.truststore.password = null
6504 ssl.truststore.type = JKS
6505 transaction.timeout.ms = 60000
6506 transaction.two.phase.commit.enable = false
6507 transactional.id = null
6508 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6509
651001:32:57.171 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
651101:32:57.172 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Instantiated an idempotent producer.
651201:32:57.173 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
651301:32:57.173 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
651401:32:57.173 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832377173
651501:32:57.176 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.Metadata - [Producer clientId=producer-14] Cluster ID: 4oa31apqQtabsfPXH-H0RA
651601:32:57.176 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-14] ProducerId set to 13 with epoch 0
651701:32:57.184 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
651801:32:57.185 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
651901:32:57.185 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
652001:32:57.186 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
652101:32:57.186 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
652201:32:57.186 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-14 unregistered
652301:32:57.187 [virtual-658] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6524 allow.auto.create.topics = true
6525 auto.commit.interval.ms = 5000
6526 auto.offset.reset = earliest
6527 bootstrap.servers = [localhost:6001]
6528 check.crcs = true
6529 client.dns.lookup = use_all_dns_ips
6530 client.id = consumer-g5_1-8
6531 client.rack =
6532 connections.max.idle.ms = 540000
6533 default.api.timeout.ms = 60000
6534 enable.auto.commit = false
6535 enable.metrics.push = true
6536 exclude.internal.topics = true
6537 fetch.max.bytes = 52428800
6538 fetch.max.wait.ms = 500
6539 fetch.min.bytes = 1
6540 group.id = g5_1
6541 group.instance.id = null
6542 group.protocol = classic
6543 group.remote.assignor = null
6544 heartbeat.interval.ms = 3000
6545 interceptor.classes = []
6546 internal.leave.group.on.close = true
6547 internal.throw.on.fetch.stable.offset.unsupported = false
6548 isolation.level = read_uncommitted
6549 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6550 max.partition.fetch.bytes = 1048576
6551 max.poll.interval.ms = 300000
6552 max.poll.records = 500
6553 metadata.max.age.ms = 300000
6554 metadata.recovery.rebootstrap.trigger.ms = 300000
6555 metadata.recovery.strategy = rebootstrap
6556 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6557 metrics.num.samples = 2
6558 metrics.recording.level = INFO
6559 metrics.sample.window.ms = 30000
6560 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6561 receive.buffer.bytes = 65536
6562 reconnect.backoff.max.ms = 1000
6563 reconnect.backoff.ms = 50
6564 request.timeout.ms = 30000
6565 retry.backoff.max.ms = 1000
6566 retry.backoff.ms = 100
6567 sasl.client.callback.handler.class = null
6568 sasl.jaas.config = null
6569 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6570 sasl.kerberos.min.time.before.relogin = 60000
6571 sasl.kerberos.service.name = null
6572 sasl.kerberos.ticket.renew.jitter = 0.05
6573 sasl.kerberos.ticket.renew.window.factor = 0.8
6574 sasl.login.callback.handler.class = null
6575 sasl.login.class = null
6576 sasl.login.connect.timeout.ms = null
6577 sasl.login.read.timeout.ms = null
6578 sasl.login.refresh.buffer.seconds = 300
6579 sasl.login.refresh.min.period.seconds = 60
6580 sasl.login.refresh.window.factor = 0.8
6581 sasl.login.refresh.window.jitter = 0.05
6582 sasl.login.retry.backoff.max.ms = 10000
6583 sasl.login.retry.backoff.ms = 100
6584 sasl.mechanism = GSSAPI
6585 sasl.oauthbearer.assertion.algorithm = RS256
6586 sasl.oauthbearer.assertion.claim.aud = null
6587 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6588 sasl.oauthbearer.assertion.claim.iss = null
6589 sasl.oauthbearer.assertion.claim.jti.include = false
6590 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6591 sasl.oauthbearer.assertion.claim.sub = null
6592 sasl.oauthbearer.assertion.file = null
6593 sasl.oauthbearer.assertion.private.key.file = null
6594 sasl.oauthbearer.assertion.private.key.passphrase = null
6595 sasl.oauthbearer.assertion.template.file = null
6596 sasl.oauthbearer.client.credentials.client.id = null
6597 sasl.oauthbearer.client.credentials.client.secret = null
6598 sasl.oauthbearer.clock.skew.seconds = 30
6599 sasl.oauthbearer.expected.audience = null
6600 sasl.oauthbearer.expected.issuer = null
6601 sasl.oauthbearer.header.urlencode = false
6602 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6603 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6604 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6605 sasl.oauthbearer.jwks.endpoint.url = null
6606 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6607 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6608 sasl.oauthbearer.scope = null
6609 sasl.oauthbearer.scope.claim.name = scope
6610 sasl.oauthbearer.sub.claim.name = sub
6611 sasl.oauthbearer.token.endpoint.url = null
6612 security.protocol = PLAINTEXT
6613 security.providers = null
6614 send.buffer.bytes = 131072
6615 session.timeout.ms = 45000
6616 share.acknowledgement.mode = implicit
6617 socket.connection.setup.timeout.max.ms = 30000
6618 socket.connection.setup.timeout.ms = 10000
6619 ssl.cipher.suites = null
6620 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6621 ssl.endpoint.identification.algorithm = https
6622 ssl.engine.factory.class = null
6623 ssl.key.password = null
6624 ssl.keymanager.algorithm = SunX509
6625 ssl.keystore.certificate.chain = null
6626 ssl.keystore.key = null
6627 ssl.keystore.location = null
6628 ssl.keystore.password = null
6629 ssl.keystore.type = JKS
6630 ssl.protocol = TLSv1.3
6631 ssl.provider = null
6632 ssl.secure.random.implementation = null
6633 ssl.trustmanager.algorithm = PKIX
6634 ssl.truststore.certificates = null
6635 ssl.truststore.location = null
6636 ssl.truststore.password = null
6637 ssl.truststore.type = JKS
6638 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6639
664001:32:57.187 [virtual-658] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
664101:32:57.187 [virtual-660] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6642 allow.auto.create.topics = true
6643 auto.commit.interval.ms = 5000
6644 auto.offset.reset = earliest
6645 bootstrap.servers = [localhost:6001]
6646 check.crcs = true
6647 client.dns.lookup = use_all_dns_ips
6648 client.id = consumer-g5_1-9
6649 client.rack =
6650 connections.max.idle.ms = 540000
6651 default.api.timeout.ms = 60000
6652 enable.auto.commit = false
6653 enable.metrics.push = true
6654 exclude.internal.topics = true
6655 fetch.max.bytes = 52428800
6656 fetch.max.wait.ms = 500
6657 fetch.min.bytes = 1
6658 group.id = g5_1
6659 group.instance.id = null
6660 group.protocol = classic
6661 group.remote.assignor = null
6662 heartbeat.interval.ms = 3000
6663 interceptor.classes = []
6664 internal.leave.group.on.close = true
6665 internal.throw.on.fetch.stable.offset.unsupported = false
6666 isolation.level = read_uncommitted
6667 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6668 max.partition.fetch.bytes = 1048576
6669 max.poll.interval.ms = 300000
6670 max.poll.records = 500
6671 metadata.max.age.ms = 300000
6672 metadata.recovery.rebootstrap.trigger.ms = 300000
6673 metadata.recovery.strategy = rebootstrap
6674 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6675 metrics.num.samples = 2
6676 metrics.recording.level = INFO
6677 metrics.sample.window.ms = 30000
6678 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6679 receive.buffer.bytes = 65536
6680 reconnect.backoff.max.ms = 1000
6681 reconnect.backoff.ms = 50
6682 request.timeout.ms = 30000
6683 retry.backoff.max.ms = 1000
6684 retry.backoff.ms = 100
6685 sasl.client.callback.handler.class = null
6686 sasl.jaas.config = null
6687 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6688 sasl.kerberos.min.time.before.relogin = 60000
6689 sasl.kerberos.service.name = null
6690 sasl.kerberos.ticket.renew.jitter = 0.05
6691 sasl.kerberos.ticket.renew.window.factor = 0.8
6692 sasl.login.callback.handler.class = null
6693 sasl.login.class = null
6694 sasl.login.connect.timeout.ms = null
6695 sasl.login.read.timeout.ms = null
6696 sasl.login.refresh.buffer.seconds = 300
6697 sasl.login.refresh.min.period.seconds = 60
6698 sasl.login.refresh.window.factor = 0.8
6699 sasl.login.refresh.window.jitter = 0.05
6700 sasl.login.retry.backoff.max.ms = 10000
6701 sasl.login.retry.backoff.ms = 100
6702 sasl.mechanism = GSSAPI
6703 sasl.oauthbearer.assertion.algorithm = RS256
6704 sasl.oauthbearer.assertion.claim.aud = null
6705 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6706 sasl.oauthbearer.assertion.claim.iss = null
6707 sasl.oauthbearer.assertion.claim.jti.include = false
6708 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6709 sasl.oauthbearer.assertion.claim.sub = null
6710 sasl.oauthbearer.assertion.file = null
6711 sasl.oauthbearer.assertion.private.key.file = null
6712 sasl.oauthbearer.assertion.private.key.passphrase = null
6713 sasl.oauthbearer.assertion.template.file = null
6714 sasl.oauthbearer.client.credentials.client.id = null
6715 sasl.oauthbearer.client.credentials.client.secret = null
6716 sasl.oauthbearer.clock.skew.seconds = 30
6717 sasl.oauthbearer.expected.audience = null
6718 sasl.oauthbearer.expected.issuer = null
6719 sasl.oauthbearer.header.urlencode = false
6720 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6721 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6722 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6723 sasl.oauthbearer.jwks.endpoint.url = null
6724 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6725 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6726 sasl.oauthbearer.scope = null
6727 sasl.oauthbearer.scope.claim.name = scope
6728 sasl.oauthbearer.sub.claim.name = sub
6729 sasl.oauthbearer.token.endpoint.url = null
6730 security.protocol = PLAINTEXT
6731 security.providers = null
6732 send.buffer.bytes = 131072
6733 session.timeout.ms = 45000
6734 share.acknowledgement.mode = implicit
6735 socket.connection.setup.timeout.max.ms = 30000
6736 socket.connection.setup.timeout.ms = 10000
6737 ssl.cipher.suites = null
6738 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6739 ssl.endpoint.identification.algorithm = https
6740 ssl.engine.factory.class = null
6741 ssl.key.password = null
6742 ssl.keymanager.algorithm = SunX509
6743 ssl.keystore.certificate.chain = null
6744 ssl.keystore.key = null
6745 ssl.keystore.location = null
6746 ssl.keystore.password = null
6747 ssl.keystore.type = JKS
6748 ssl.protocol = TLSv1.3
6749 ssl.provider = null
6750 ssl.secure.random.implementation = null
6751 ssl.trustmanager.algorithm = PKIX
6752 ssl.truststore.certificates = null
6753 ssl.truststore.location = null
6754 ssl.truststore.password = null
6755 ssl.truststore.type = JKS
6756 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6757
675801:32:57.189 [virtual-660] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
675901:32:57.191 [virtual-658] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
676001:32:57.191 [virtual-658] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
676101:32:57.191 [virtual-658] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832377191
676201:32:57.191 [virtual-663] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Subscribed to topic(s): t5_2
676301:32:57.193 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
676401:32:57.193 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
676501:32:57.193 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832377193
676601:32:57.194 [data-plane-kafka-request-handler-6] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_2) to the active controller.
676701:32:57.194 [virtual-663] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t5_2=UNKNOWN_TOPIC_OR_PARTITION}
676801:32:57.195 [virtual-663] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
676901:32:57.194 [virtual-660] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6770 acks = -1
6771 batch.size = 16384
6772 bootstrap.servers = [localhost:6001]
6773 buffer.memory = 33554432
6774 client.dns.lookup = use_all_dns_ips
6775 client.id = producer-15
6776 compression.gzip.level = -1
6777 compression.lz4.level = 9
6778 compression.type = none
6779 compression.zstd.level = 3
6780 connections.max.idle.ms = 540000
6781 delivery.timeout.ms = 120000
6782 enable.idempotence = true
6783 enable.metrics.push = true
6784 interceptor.classes = []
6785 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6786 linger.ms = 5
6787 max.block.ms = 60000
6788 max.in.flight.requests.per.connection = 5
6789 max.request.size = 1048576
6790 metadata.max.age.ms = 300000
6791 metadata.max.idle.ms = 300000
6792 metadata.recovery.rebootstrap.trigger.ms = 300000
6793 metadata.recovery.strategy = rebootstrap
6794 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6795 metrics.num.samples = 2
6796 metrics.recording.level = INFO
6797 metrics.sample.window.ms = 30000
6798 partitioner.adaptive.partitioning.enable = true
6799 partitioner.availability.timeout.ms = 0
6800 partitioner.class = null
6801 partitioner.ignore.keys = false
6802 receive.buffer.bytes = 32768
6803 reconnect.backoff.max.ms = 1000
6804 reconnect.backoff.ms = 50
6805 request.timeout.ms = 30000
6806 retries = 2147483647
6807 retry.backoff.max.ms = 1000
6808 retry.backoff.ms = 100
6809 sasl.client.callback.handler.class = null
6810 sasl.jaas.config = null
6811 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6812 sasl.kerberos.min.time.before.relogin = 60000
6813 sasl.kerberos.service.name = null
6814 sasl.kerberos.ticket.renew.jitter = 0.05
6815 sasl.kerberos.ticket.renew.window.factor = 0.8
6816 sasl.login.callback.handler.class = null
6817 sasl.login.class = null
6818 sasl.login.connect.timeout.ms = null
6819 sasl.login.read.timeout.ms = null
6820 sasl.login.refresh.buffer.seconds = 300
6821 sasl.login.refresh.min.period.seconds = 60
6822 sasl.login.refresh.window.factor = 0.8
6823 sasl.login.refresh.window.jitter = 0.05
6824 sasl.login.retry.backoff.max.ms = 10000
6825 sasl.login.retry.backoff.ms = 100
6826 sasl.mechanism = GSSAPI
6827 sasl.oauthbearer.assertion.algorithm = RS256
6828 sasl.oauthbearer.assertion.claim.aud = null
6829 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6830 sasl.oauthbearer.assertion.claim.iss = null
6831 sasl.oauthbearer.assertion.claim.jti.include = false
6832 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6833 sasl.oauthbearer.assertion.claim.sub = null
6834 sasl.oauthbearer.assertion.file = null
6835 sasl.oauthbearer.assertion.private.key.file = null
6836 sasl.oauthbearer.assertion.private.key.passphrase = null
6837 sasl.oauthbearer.assertion.template.file = null
6838 sasl.oauthbearer.client.credentials.client.id = null
6839 sasl.oauthbearer.client.credentials.client.secret = null
6840 sasl.oauthbearer.clock.skew.seconds = 30
6841 sasl.oauthbearer.expected.audience = null
6842 sasl.oauthbearer.expected.issuer = null
6843 sasl.oauthbearer.header.urlencode = false
6844 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6845 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6846 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6847 sasl.oauthbearer.jwks.endpoint.url = null
6848 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6849 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6850 sasl.oauthbearer.scope = null
6851 sasl.oauthbearer.scope.claim.name = scope
6852 sasl.oauthbearer.sub.claim.name = sub
6853 sasl.oauthbearer.token.endpoint.url = null
6854 security.protocol = PLAINTEXT
6855 security.providers = null
6856 send.buffer.bytes = 131072
6857 socket.connection.setup.timeout.max.ms = 30000
6858 socket.connection.setup.timeout.ms = 10000
6859 ssl.cipher.suites = null
6860 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6861 ssl.endpoint.identification.algorithm = https
6862 ssl.engine.factory.class = null
6863 ssl.key.password = null
6864 ssl.keymanager.algorithm = SunX509
6865 ssl.keystore.certificate.chain = null
6866 ssl.keystore.key = null
6867 ssl.keystore.location = null
6868 ssl.keystore.password = null
6869 ssl.keystore.type = JKS
6870 ssl.protocol = TLSv1.3
6871 ssl.provider = null
6872 ssl.secure.random.implementation = null
6873 ssl.trustmanager.algorithm = PKIX
6874 ssl.truststore.certificates = null
6875 ssl.truststore.location = null
6876 ssl.truststore.password = null
6877 ssl.truststore.type = JKS
6878 transaction.timeout.ms = 60000
6879 transaction.two.phase.commit.enable = false
6880 transactional.id = null
6881 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6882
688301:32:57.195 [virtual-660] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
688401:32:57.195 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
688501:32:57.197 [virtual-660] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Instantiated an idempotent producer.
688601:32:57.197 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
688701:32:57.199 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
688801:32:57.200 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
688901:32:57.200 [virtual-660] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832377199
689001:32:57.200 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
689101:32:57.200 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_2 with topic ID n7l0L0taTVSuVZzhCdTdRg.
689201:32:57.200 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_2-0 with topic ID n7l0L0taTVSuVZzhCdTdRg and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
689301:32:57.201 [virtual-664] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Subscribed to topic(s): t5_1
689401:32:57.202 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab and requesting the member to rejoin with this id.
689501:32:57.202 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab
689601:32:57.203 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
689701:32:57.204 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab joins group g5_1 in Empty state. Adding to the group now.
689801:32:57.204 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab).
689901:32:57.205 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.Metadata - [Producer clientId=producer-15] Cluster ID: 4oa31apqQtabsfPXH-H0RA
690001:32:57.205 [virtual-664] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
690101:32:57.206 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-15] ProducerId set to 14 with epoch 0
690201:32:57.206 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
690301:32:57.206 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
690401:32:57.208 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in PreparingRebalance state. Created a new member id consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f and requesting the member to rejoin with this id.
690501:32:57.208 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f
690601:32:57.208 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
690701:32:57.209 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f joins group g5_1 in PreparingRebalance state. Adding to the group now.
690801:32:57.227 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
690901:32:57.227 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_2-0)
691001:32:57.227 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_2-0 with topic id n7l0L0taTVSuVZzhCdTdRg.
691101:32:57.229 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_2-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
691201:32:57.230 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_2-0 in /tmp/kafka-logs15769196062054598040/t5_2-0 with properties {}
691301:32:57.230 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] No checkpointed highwatermark is found for partition t5_2-0
691401:32:57.230 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] Log loaded for partition t5_2-0 with initial high watermark 0
691501:32:57.231 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_2-0 with topic id Some(n7l0L0taTVSuVZzhCdTdRg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
691601:33:03.206 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 1 with 2 members.
691701:33:03.207 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f', protocol='range'}
691801:33:03.207 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab', protocol='range'}
691901:33:03.209 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Finished assignment for group at generation 1: {consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab=Assignment(partitions=[t5_2-0]), consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f=Assignment(partitions=[t5_1-0])}
692001:33:03.209 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab for group g5_1 for generation 1. The group has 2 members, 0 of which are static.
692101:33:03.215 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab', protocol='range'}
692201:33:03.215 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f', protocol='range'}
692301:33:03.216 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
692401:33:03.216 [virtual-664] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
692501:33:03.216 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_2-0])
692601:33:03.216 [virtual-663] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Adding newly assigned partitions: [t5_2-0]
692701:33:03.217 [virtual-663] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Found no committed offset for partition t5_2-0
692801:33:03.217 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Found no committed offset for partition t5_1-0
692901:33:03.218 [virtual-663] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting offset for partition t5_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
693001:33:03.220 [virtual-664] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
693101:33:05.233 [virtual-662] ERROR o.k.KafkaFlow$ - Exception when polling for records
6932java.lang.InterruptedException: null
6933 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
6934 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
6935 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
6936 at ox.channels.ActorRef.ask(actor.scala:64)
6937 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
6938 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
6939 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6940 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6941 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
6942 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
6943 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
6944 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6945 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6946 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
694701:33:05.233 [virtual-664] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6948java.lang.InterruptedException: null
6949 ... 18 common frames omitted
6950Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
6951 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
6952 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
6953 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
6954 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
6955 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
6956 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
6957 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
6958 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
6959 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
6960 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
6961 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6962 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6963 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
6964 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
6965 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
6966 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6967 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6968 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
696901:33:05.233 [virtual-668] ERROR o.k.KafkaFlow$ - Exception when polling for records
6970java.lang.InterruptedException: null
6971 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
6972 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
6973 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
6974 at ox.channels.ActorRef.ask(actor.scala:64)
6975 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
6976 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
6977 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6978 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6979 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
6980 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6981 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6982 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
6983 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
6984 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6985 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
6986 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
6987 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
6988 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
6989 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6990 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6991 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
699201:33:05.234 [virtual-674] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
699301:33:05.234 [virtual-673] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
699401:33:05.234 [virtual-673] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Member consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
699501:33:05.233 [virtual-663] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6996java.lang.InterruptedException: null
6997 ... 18 common frames omitted
6998Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
6999 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7000 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7001 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7002 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7003 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7004 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7005 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7006 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7007 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7008 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7009 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7010 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7011 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7012 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7013 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7014 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7015 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7016 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
701701:33:05.234 [virtual-673] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
701801:33:05.234 [virtual-673] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
701901:33:05.235 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
702001:33:05.235 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_1-9-dd9be8bf-36cc-4f02-a6b9-a47f74e2e70f) members.).
702101:33:05.235 [virtual-675] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Revoke previously assigned partitions [t5_2-0]
702201:33:05.235 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Member consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
702301:33:05.236 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
702401:33:05.236 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
702501:33:05.237 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-8-18ca6816-2e30-4b42-b75b-2f0448617dab has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
702601:33:05.237 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 2 is now empty.
702701:33:05.237 [virtual-674] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
702801:33:05.237 [virtual-674] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
702901:33:05.237 [virtual-674] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
703001:33:05.237 [virtual-674] INFO o.a.k.c.m.Metrics - Metrics reporters closed
703101:33:05.237 [virtual-674] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-15 unregistered
703201:33:05.243 [virtual-675] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
703301:33:05.243 [virtual-675] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
703401:33:05.243 [virtual-675] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
703501:33:05.243 [virtual-675] INFO o.a.k.c.m.Metrics - Metrics reporters closed
703601:33:05.245 [virtual-675] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-8 unregistered
703701:33:05.731 [virtual-673] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
703801:33:05.731 [virtual-673] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
703901:33:05.731 [virtual-673] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
704001:33:05.731 [virtual-673] INFO o.a.k.c.m.Metrics - Metrics reporters closed
704101:33:05.732 [virtual-673] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-9 unregistered
704201:33:05.733 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7043 acks = -1
7044 batch.size = 16384
7045 bootstrap.servers = [localhost:6001]
7046 buffer.memory = 33554432
7047 client.dns.lookup = use_all_dns_ips
7048 client.id = producer-16
7049 compression.gzip.level = -1
7050 compression.lz4.level = 9
7051 compression.type = none
7052 compression.zstd.level = 3
7053 connections.max.idle.ms = 540000
7054 delivery.timeout.ms = 120000
7055 enable.idempotence = true
7056 enable.metrics.push = true
7057 interceptor.classes = []
7058 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7059 linger.ms = 5
7060 max.block.ms = 10000
7061 max.in.flight.requests.per.connection = 5
7062 max.request.size = 1048576
7063 metadata.max.age.ms = 300000
7064 metadata.max.idle.ms = 300000
7065 metadata.recovery.rebootstrap.trigger.ms = 300000
7066 metadata.recovery.strategy = rebootstrap
7067 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7068 metrics.num.samples = 2
7069 metrics.recording.level = INFO
7070 metrics.sample.window.ms = 30000
7071 partitioner.adaptive.partitioning.enable = true
7072 partitioner.availability.timeout.ms = 0
7073 partitioner.class = null
7074 partitioner.ignore.keys = false
7075 receive.buffer.bytes = 32768
7076 reconnect.backoff.max.ms = 1000
7077 reconnect.backoff.ms = 50
7078 request.timeout.ms = 30000
7079 retries = 2147483647
7080 retry.backoff.max.ms = 1000
7081 retry.backoff.ms = 1000
7082 sasl.client.callback.handler.class = null
7083 sasl.jaas.config = null
7084 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7085 sasl.kerberos.min.time.before.relogin = 60000
7086 sasl.kerberos.service.name = null
7087 sasl.kerberos.ticket.renew.jitter = 0.05
7088 sasl.kerberos.ticket.renew.window.factor = 0.8
7089 sasl.login.callback.handler.class = null
7090 sasl.login.class = null
7091 sasl.login.connect.timeout.ms = null
7092 sasl.login.read.timeout.ms = null
7093 sasl.login.refresh.buffer.seconds = 300
7094 sasl.login.refresh.min.period.seconds = 60
7095 sasl.login.refresh.window.factor = 0.8
7096 sasl.login.refresh.window.jitter = 0.05
7097 sasl.login.retry.backoff.max.ms = 10000
7098 sasl.login.retry.backoff.ms = 100
7099 sasl.mechanism = GSSAPI
7100 sasl.oauthbearer.assertion.algorithm = RS256
7101 sasl.oauthbearer.assertion.claim.aud = null
7102 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7103 sasl.oauthbearer.assertion.claim.iss = null
7104 sasl.oauthbearer.assertion.claim.jti.include = false
7105 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7106 sasl.oauthbearer.assertion.claim.sub = null
7107 sasl.oauthbearer.assertion.file = null
7108 sasl.oauthbearer.assertion.private.key.file = null
7109 sasl.oauthbearer.assertion.private.key.passphrase = null
7110 sasl.oauthbearer.assertion.template.file = null
7111 sasl.oauthbearer.client.credentials.client.id = null
7112 sasl.oauthbearer.client.credentials.client.secret = null
7113 sasl.oauthbearer.clock.skew.seconds = 30
7114 sasl.oauthbearer.expected.audience = null
7115 sasl.oauthbearer.expected.issuer = null
7116 sasl.oauthbearer.header.urlencode = false
7117 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7118 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7119 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7120 sasl.oauthbearer.jwks.endpoint.url = null
7121 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7122 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7123 sasl.oauthbearer.scope = null
7124 sasl.oauthbearer.scope.claim.name = scope
7125 sasl.oauthbearer.sub.claim.name = sub
7126 sasl.oauthbearer.token.endpoint.url = null
7127 security.protocol = PLAINTEXT
7128 security.providers = null
7129 send.buffer.bytes = 131072
7130 socket.connection.setup.timeout.max.ms = 30000
7131 socket.connection.setup.timeout.ms = 10000
7132 ssl.cipher.suites = null
7133 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7134 ssl.endpoint.identification.algorithm = https
7135 ssl.engine.factory.class = null
7136 ssl.key.password = null
7137 ssl.keymanager.algorithm = SunX509
7138 ssl.keystore.certificate.chain = null
7139 ssl.keystore.key = null
7140 ssl.keystore.location = null
7141 ssl.keystore.password = null
7142 ssl.keystore.type = JKS
7143 ssl.protocol = TLSv1.3
7144 ssl.provider = null
7145 ssl.secure.random.implementation = null
7146 ssl.trustmanager.algorithm = PKIX
7147 ssl.truststore.certificates = null
7148 ssl.truststore.location = null
7149 ssl.truststore.password = null
7150 ssl.truststore.type = JKS
7151 transaction.timeout.ms = 60000
7152 transaction.two.phase.commit.enable = false
7153 transactional.id = null
7154 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7155
715601:33:05.733 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
715701:33:05.733 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Instantiated an idempotent producer.
715801:33:05.735 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
715901:33:05.735 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
716001:33:05.735 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832385735
716101:33:05.737 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.Metadata - [Producer clientId=producer-16] Cluster ID: 4oa31apqQtabsfPXH-H0RA
716201:33:05.737 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-16] ProducerId set to 15 with epoch 0
716301:33:05.744 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
716401:33:05.746 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
716501:33:05.746 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
716601:33:05.746 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
716701:33:05.746 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
716801:33:05.746 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-16 unregistered
716901:33:05.748 [virtual-677] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7170 allow.auto.create.topics = true
7171 auto.commit.interval.ms = 5000
7172 auto.offset.reset = earliest
7173 bootstrap.servers = [localhost:6001]
7174 check.crcs = true
7175 client.dns.lookup = use_all_dns_ips
7176 client.id = consumer-g5_1-10
7177 client.rack =
7178 connections.max.idle.ms = 540000
7179 default.api.timeout.ms = 60000
7180 enable.auto.commit = false
7181 enable.metrics.push = true
7182 exclude.internal.topics = true
7183 fetch.max.bytes = 52428800
7184 fetch.max.wait.ms = 500
7185 fetch.min.bytes = 1
7186 group.id = g5_1
7187 group.instance.id = null
7188 group.protocol = classic
7189 group.remote.assignor = null
7190 heartbeat.interval.ms = 3000
7191 interceptor.classes = []
7192 internal.leave.group.on.close = true
7193 internal.throw.on.fetch.stable.offset.unsupported = false
7194 isolation.level = read_uncommitted
7195 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7196 max.partition.fetch.bytes = 1048576
7197 max.poll.interval.ms = 300000
7198 max.poll.records = 500
7199 metadata.max.age.ms = 300000
7200 metadata.recovery.rebootstrap.trigger.ms = 300000
7201 metadata.recovery.strategy = rebootstrap
7202 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7203 metrics.num.samples = 2
7204 metrics.recording.level = INFO
7205 metrics.sample.window.ms = 30000
7206 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7207 receive.buffer.bytes = 65536
7208 reconnect.backoff.max.ms = 1000
7209 reconnect.backoff.ms = 50
7210 request.timeout.ms = 30000
7211 retry.backoff.max.ms = 1000
7212 retry.backoff.ms = 100
7213 sasl.client.callback.handler.class = null
7214 sasl.jaas.config = null
7215 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7216 sasl.kerberos.min.time.before.relogin = 60000
7217 sasl.kerberos.service.name = null
7218 sasl.kerberos.ticket.renew.jitter = 0.05
7219 sasl.kerberos.ticket.renew.window.factor = 0.8
7220 sasl.login.callback.handler.class = null
7221 sasl.login.class = null
7222 sasl.login.connect.timeout.ms = null
7223 sasl.login.read.timeout.ms = null
7224 sasl.login.refresh.buffer.seconds = 300
7225 sasl.login.refresh.min.period.seconds = 60
7226 sasl.login.refresh.window.factor = 0.8
7227 sasl.login.refresh.window.jitter = 0.05
7228 sasl.login.retry.backoff.max.ms = 10000
7229 sasl.login.retry.backoff.ms = 100
7230 sasl.mechanism = GSSAPI
7231 sasl.oauthbearer.assertion.algorithm = RS256
7232 sasl.oauthbearer.assertion.claim.aud = null
7233 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7234 sasl.oauthbearer.assertion.claim.iss = null
7235 sasl.oauthbearer.assertion.claim.jti.include = false
7236 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7237 sasl.oauthbearer.assertion.claim.sub = null
7238 sasl.oauthbearer.assertion.file = null
7239 sasl.oauthbearer.assertion.private.key.file = null
7240 sasl.oauthbearer.assertion.private.key.passphrase = null
7241 sasl.oauthbearer.assertion.template.file = null
7242 sasl.oauthbearer.client.credentials.client.id = null
7243 sasl.oauthbearer.client.credentials.client.secret = null
7244 sasl.oauthbearer.clock.skew.seconds = 30
7245 sasl.oauthbearer.expected.audience = null
7246 sasl.oauthbearer.expected.issuer = null
7247 sasl.oauthbearer.header.urlencode = false
7248 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7249 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7250 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7251 sasl.oauthbearer.jwks.endpoint.url = null
7252 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7253 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7254 sasl.oauthbearer.scope = null
7255 sasl.oauthbearer.scope.claim.name = scope
7256 sasl.oauthbearer.sub.claim.name = sub
7257 sasl.oauthbearer.token.endpoint.url = null
7258 security.protocol = PLAINTEXT
7259 security.providers = null
7260 send.buffer.bytes = 131072
7261 session.timeout.ms = 45000
7262 share.acknowledgement.mode = implicit
7263 socket.connection.setup.timeout.max.ms = 30000
7264 socket.connection.setup.timeout.ms = 10000
7265 ssl.cipher.suites = null
7266 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7267 ssl.endpoint.identification.algorithm = https
7268 ssl.engine.factory.class = null
7269 ssl.key.password = null
7270 ssl.keymanager.algorithm = SunX509
7271 ssl.keystore.certificate.chain = null
7272 ssl.keystore.key = null
7273 ssl.keystore.location = null
7274 ssl.keystore.password = null
7275 ssl.keystore.type = JKS
7276 ssl.protocol = TLSv1.3
7277 ssl.provider = null
7278 ssl.secure.random.implementation = null
7279 ssl.trustmanager.algorithm = PKIX
7280 ssl.truststore.certificates = null
7281 ssl.truststore.location = null
7282 ssl.truststore.password = null
7283 ssl.truststore.type = JKS
7284 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7285
728601:33:05.748 [virtual-677] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
728701:33:05.750 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
728801:33:05.750 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
728901:33:05.750 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832385750
729001:33:05.751 [virtual-680] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Subscribed to topic(s): t5_1
729101:33:05.754 [virtual-680] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
729201:33:05.754 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
729301:33:05.755 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
729401:33:05.757 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da and requesting the member to rejoin with this id.
729501:33:05.757 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da
729601:33:05.757 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
729701:33:05.758 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da joins group g5_1 in Empty state. Adding to the group now.
729801:33:05.758 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da).
729901:33:08.758 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 3 with 1 members.
730001:33:08.759 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da', protocol='range'}
730101:33:08.759 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Finished assignment for group at generation 3: {consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da=Assignment(partitions=[t5_1-0])}
730201:33:08.760 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da for group g5_1 for generation 3. The group has 1 members, 0 of which are static.
730301:33:08.766 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da', protocol='range'}
730401:33:08.767 [virtual-680] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
730501:33:08.767 [virtual-680] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
730601:33:08.768 [virtual-680] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t5_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
730701:33:08.772 [virtual-677] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7308 allow.auto.create.topics = true
7309 auto.commit.interval.ms = 5000
7310 auto.offset.reset = earliest
7311 bootstrap.servers = [localhost:6001]
7312 check.crcs = true
7313 client.dns.lookup = use_all_dns_ips
7314 client.id = consumer-g5_2-11
7315 client.rack =
7316 connections.max.idle.ms = 540000
7317 default.api.timeout.ms = 60000
7318 enable.auto.commit = false
7319 enable.metrics.push = true
7320 exclude.internal.topics = true
7321 fetch.max.bytes = 52428800
7322 fetch.max.wait.ms = 500
7323 fetch.min.bytes = 1
7324 group.id = g5_2
7325 group.instance.id = null
7326 group.protocol = classic
7327 group.remote.assignor = null
7328 heartbeat.interval.ms = 3000
7329 interceptor.classes = []
7330 internal.leave.group.on.close = true
7331 internal.throw.on.fetch.stable.offset.unsupported = false
7332 isolation.level = read_uncommitted
7333 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7334 max.partition.fetch.bytes = 1048576
7335 max.poll.interval.ms = 300000
7336 max.poll.records = 500
7337 metadata.max.age.ms = 300000
7338 metadata.recovery.rebootstrap.trigger.ms = 300000
7339 metadata.recovery.strategy = rebootstrap
7340 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7341 metrics.num.samples = 2
7342 metrics.recording.level = INFO
7343 metrics.sample.window.ms = 30000
7344 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7345 receive.buffer.bytes = 65536
7346 reconnect.backoff.max.ms = 1000
7347 reconnect.backoff.ms = 50
7348 request.timeout.ms = 30000
7349 retry.backoff.max.ms = 1000
7350 retry.backoff.ms = 100
7351 sasl.client.callback.handler.class = null
7352 sasl.jaas.config = null
7353 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7354 sasl.kerberos.min.time.before.relogin = 60000
7355 sasl.kerberos.service.name = null
7356 sasl.kerberos.ticket.renew.jitter = 0.05
7357 sasl.kerberos.ticket.renew.window.factor = 0.8
7358 sasl.login.callback.handler.class = null
7359 sasl.login.class = null
7360 sasl.login.connect.timeout.ms = null
7361 sasl.login.read.timeout.ms = null
7362 sasl.login.refresh.buffer.seconds = 300
7363 sasl.login.refresh.min.period.seconds = 60
7364 sasl.login.refresh.window.factor = 0.8
7365 sasl.login.refresh.window.jitter = 0.05
7366 sasl.login.retry.backoff.max.ms = 10000
7367 sasl.login.retry.backoff.ms = 100
7368 sasl.mechanism = GSSAPI
7369 sasl.oauthbearer.assertion.algorithm = RS256
7370 sasl.oauthbearer.assertion.claim.aud = null
7371 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7372 sasl.oauthbearer.assertion.claim.iss = null
7373 sasl.oauthbearer.assertion.claim.jti.include = false
7374 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7375 sasl.oauthbearer.assertion.claim.sub = null
7376 sasl.oauthbearer.assertion.file = null
7377 sasl.oauthbearer.assertion.private.key.file = null
7378 sasl.oauthbearer.assertion.private.key.passphrase = null
7379 sasl.oauthbearer.assertion.template.file = null
7380 sasl.oauthbearer.client.credentials.client.id = null
7381 sasl.oauthbearer.client.credentials.client.secret = null
7382 sasl.oauthbearer.clock.skew.seconds = 30
7383 sasl.oauthbearer.expected.audience = null
7384 sasl.oauthbearer.expected.issuer = null
7385 sasl.oauthbearer.header.urlencode = false
7386 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7387 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7388 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7389 sasl.oauthbearer.jwks.endpoint.url = null
7390 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7391 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7392 sasl.oauthbearer.scope = null
7393 sasl.oauthbearer.scope.claim.name = scope
7394 sasl.oauthbearer.sub.claim.name = sub
7395 sasl.oauthbearer.token.endpoint.url = null
7396 security.protocol = PLAINTEXT
7397 security.providers = null
7398 send.buffer.bytes = 131072
7399 session.timeout.ms = 45000
7400 share.acknowledgement.mode = implicit
7401 socket.connection.setup.timeout.max.ms = 30000
7402 socket.connection.setup.timeout.ms = 10000
7403 ssl.cipher.suites = null
7404 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7405 ssl.endpoint.identification.algorithm = https
7406 ssl.engine.factory.class = null
7407 ssl.key.password = null
7408 ssl.keymanager.algorithm = SunX509
7409 ssl.keystore.certificate.chain = null
7410 ssl.keystore.key = null
7411 ssl.keystore.location = null
7412 ssl.keystore.password = null
7413 ssl.keystore.type = JKS
7414 ssl.protocol = TLSv1.3
7415 ssl.provider = null
7416 ssl.secure.random.implementation = null
7417 ssl.trustmanager.algorithm = PKIX
7418 ssl.truststore.certificates = null
7419 ssl.truststore.location = null
7420 ssl.truststore.password = null
7421 ssl.truststore.type = JKS
7422 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7423
742401:33:08.773 [virtual-677] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
742501:33:08.775 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
742601:33:08.775 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
742701:33:08.775 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832388775
742801:33:08.776 [virtual-684] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Subscribed to topic(s): t5_1
742901:33:08.779 [virtual-684] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Cluster ID: 4oa31apqQtabsfPXH-H0RA
743001:33:08.779 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
743101:33:08.780 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
743201:33:08.782 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_2 in Empty state. Created a new member id consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447 and requesting the member to rejoin with this id.
743301:33:08.783 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: need to re-join with the given member-id: consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447
743401:33:08.783 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
743501:33:08.783 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447 joins group g5_2 in Empty state. Adding to the group now.
743601:33:08.784 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447 with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447).
743701:33:11.784 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_2 generation 1 with 1 members.
743801:33:11.784 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447', protocol='range'}
743901:33:11.785 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Finished assignment for group at generation 1: {consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447=Assignment(partitions=[t5_1-0])}
744001:33:11.785 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447 for group g5_2 for generation 1. The group has 1 members, 0 of which are static.
744101:33:11.791 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447', protocol='range'}
744201:33:11.791 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Notifying assignor about the new Assignment(partitions=[t5_1-0])
744301:33:11.791 [virtual-684] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Adding newly assigned partitions: [t5_1-0]
744401:33:11.792 [virtual-684] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Found no committed offset for partition t5_1-0
744501:33:11.795 [virtual-684] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
744601:33:11.797 [virtual-679] ERROR o.k.KafkaFlow$ - Exception when polling for records
7447java.lang.InterruptedException: null
7448 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7449 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7450 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7451 at ox.channels.ActorRef.ask(actor.scala:64)
7452 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7453 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7454 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7455 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7456 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7457 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7458 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7459 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7460 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7461 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
746201:33:11.798 [virtual-684] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7463java.lang.InterruptedException: null
7464 ... 18 common frames omitted
7465Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7466 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7467 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7468 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7469 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7470 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7471 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7472 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7473 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7474 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7475 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7476 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7477 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7478 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7479 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7480 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7481 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7482 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7483 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
748401:33:11.797 [virtual-683] ERROR o.k.KafkaFlow$ - Exception when polling for records
7485java.lang.InterruptedException: null
7486 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7487 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7488 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7489 at ox.channels.ActorRef.ask(actor.scala:64)
7490 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7491 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7492 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7493 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7494 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7495 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7496 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7497 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7498 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7499 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
750001:33:11.798 [virtual-680] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7501java.lang.InterruptedException: null
7502 ... 18 common frames omitted
7503Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7504 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7505 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7506 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7507 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7508 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7509 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7510 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7511 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7512 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7513 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7514 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7515 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7516 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7517 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7518 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7519 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7520 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7521 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
752201:33:11.798 [virtual-686] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Revoke previously assigned partitions [t5_1-0]
752301:33:11.798 [virtual-686] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Member consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
752401:33:11.799 [virtual-686] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting generation and member id due to: consumer pro-actively leaving the group
752501:33:11.799 [virtual-686] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: consumer pro-actively leaving the group
752601:33:11.799 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_2] Member consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
752701:33:11.799 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_2-11-8a776109-de27-49b5-bcc8-4b274fb2a447) members.).
752801:33:11.799 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_2 with generation 2 is now empty.
752901:33:11.799 [virtual-687] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
753001:33:11.799 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Member consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
753101:33:11.800 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
753201:33:11.800 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
753301:33:11.800 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
753401:33:11.800 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g5_1-10-fe96921f-fd4d-4108-8806-7f9815c707da) members.).
753501:33:11.800 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 4 is now empty.
753601:33:12.284 [virtual-687] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
753701:33:12.284 [virtual-687] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
753801:33:12.284 [virtual-687] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
753901:33:12.284 [virtual-687] INFO o.a.k.c.m.Metrics - Metrics reporters closed
754001:33:12.286 [virtual-687] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-10 unregistered
754101:33:12.299 [virtual-686] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
754201:33:12.299 [virtual-686] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
754301:33:12.299 [virtual-686] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
754401:33:12.299 [virtual-686] INFO o.a.k.c.m.Metrics - Metrics reporters closed
754501:33:12.300 [virtual-686] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_2-11 unregistered
754601:33:12.302 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7547 acks = -1
7548 batch.size = 16384
7549 bootstrap.servers = [localhost:6001]
7550 buffer.memory = 33554432
7551 client.dns.lookup = use_all_dns_ips
7552 client.id = producer-17
7553 compression.gzip.level = -1
7554 compression.lz4.level = 9
7555 compression.type = none
7556 compression.zstd.level = 3
7557 connections.max.idle.ms = 540000
7558 delivery.timeout.ms = 120000
7559 enable.idempotence = true
7560 enable.metrics.push = true
7561 interceptor.classes = []
7562 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7563 linger.ms = 5
7564 max.block.ms = 10000
7565 max.in.flight.requests.per.connection = 5
7566 max.request.size = 1048576
7567 metadata.max.age.ms = 300000
7568 metadata.max.idle.ms = 300000
7569 metadata.recovery.rebootstrap.trigger.ms = 300000
7570 metadata.recovery.strategy = rebootstrap
7571 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7572 metrics.num.samples = 2
7573 metrics.recording.level = INFO
7574 metrics.sample.window.ms = 30000
7575 partitioner.adaptive.partitioning.enable = true
7576 partitioner.availability.timeout.ms = 0
7577 partitioner.class = null
7578 partitioner.ignore.keys = false
7579 receive.buffer.bytes = 32768
7580 reconnect.backoff.max.ms = 1000
7581 reconnect.backoff.ms = 50
7582 request.timeout.ms = 30000
7583 retries = 2147483647
7584 retry.backoff.max.ms = 1000
7585 retry.backoff.ms = 1000
7586 sasl.client.callback.handler.class = null
7587 sasl.jaas.config = null
7588 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7589 sasl.kerberos.min.time.before.relogin = 60000
7590 sasl.kerberos.service.name = null
7591 sasl.kerberos.ticket.renew.jitter = 0.05
7592 sasl.kerberos.ticket.renew.window.factor = 0.8
7593 sasl.login.callback.handler.class = null
7594 sasl.login.class = null
7595 sasl.login.connect.timeout.ms = null
7596 sasl.login.read.timeout.ms = null
7597 sasl.login.refresh.buffer.seconds = 300
7598 sasl.login.refresh.min.period.seconds = 60
7599 sasl.login.refresh.window.factor = 0.8
7600 sasl.login.refresh.window.jitter = 0.05
7601 sasl.login.retry.backoff.max.ms = 10000
7602 sasl.login.retry.backoff.ms = 100
7603 sasl.mechanism = GSSAPI
7604 sasl.oauthbearer.assertion.algorithm = RS256
7605 sasl.oauthbearer.assertion.claim.aud = null
7606 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7607 sasl.oauthbearer.assertion.claim.iss = null
7608 sasl.oauthbearer.assertion.claim.jti.include = false
7609 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7610 sasl.oauthbearer.assertion.claim.sub = null
7611 sasl.oauthbearer.assertion.file = null
7612 sasl.oauthbearer.assertion.private.key.file = null
7613 sasl.oauthbearer.assertion.private.key.passphrase = null
7614 sasl.oauthbearer.assertion.template.file = null
7615 sasl.oauthbearer.client.credentials.client.id = null
7616 sasl.oauthbearer.client.credentials.client.secret = null
7617 sasl.oauthbearer.clock.skew.seconds = 30
7618 sasl.oauthbearer.expected.audience = null
7619 sasl.oauthbearer.expected.issuer = null
7620 sasl.oauthbearer.header.urlencode = false
7621 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7622 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7623 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7624 sasl.oauthbearer.jwks.endpoint.url = null
7625 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7626 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7627 sasl.oauthbearer.scope = null
7628 sasl.oauthbearer.scope.claim.name = scope
7629 sasl.oauthbearer.sub.claim.name = sub
7630 sasl.oauthbearer.token.endpoint.url = null
7631 security.protocol = PLAINTEXT
7632 security.providers = null
7633 send.buffer.bytes = 131072
7634 socket.connection.setup.timeout.max.ms = 30000
7635 socket.connection.setup.timeout.ms = 10000
7636 ssl.cipher.suites = null
7637 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7638 ssl.endpoint.identification.algorithm = https
7639 ssl.engine.factory.class = null
7640 ssl.key.password = null
7641 ssl.keymanager.algorithm = SunX509
7642 ssl.keystore.certificate.chain = null
7643 ssl.keystore.key = null
7644 ssl.keystore.location = null
7645 ssl.keystore.password = null
7646 ssl.keystore.type = JKS
7647 ssl.protocol = TLSv1.3
7648 ssl.provider = null
7649 ssl.secure.random.implementation = null
7650 ssl.trustmanager.algorithm = PKIX
7651 ssl.truststore.certificates = null
7652 ssl.truststore.location = null
7653 ssl.truststore.password = null
7654 ssl.truststore.type = JKS
7655 transaction.timeout.ms = 60000
7656 transaction.two.phase.commit.enable = false
7657 transactional.id = null
7658 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7659
766001:33:12.302 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
766101:33:12.302 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Instantiated an idempotent producer.
766201:33:12.304 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
766301:33:12.304 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
766401:33:12.304 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832392304
766501:33:12.306 [data-plane-kafka-request-handler-3] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t6_1) to the active controller.
766601:33:12.307 [kafka-producer-network-thread | producer-17] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-17] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t6_1=UNKNOWN_TOPIC_OR_PARTITION}
766701:33:12.308 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.Metadata - [Producer clientId=producer-17] Cluster ID: 4oa31apqQtabsfPXH-H0RA
766801:33:12.308 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-17] ProducerId set to 16 with epoch 0
766901:33:12.307 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t6_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
767001:33:12.308 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t6_1 with topic ID MAWQIrsBTlCNdJ3-k3deEw.
767101:33:12.308 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t6_1-0 with topic ID MAWQIrsBTlCNdJ3-k3deEw and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
767201:33:12.335 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
767301:33:12.335 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t6_1-0)
767401:33:12.335 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t6_1-0 with topic id MAWQIrsBTlCNdJ3-k3deEw.
767501:33:12.338 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t6_1-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
767601:33:12.338 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t6_1-0 in /tmp/kafka-logs15769196062054598040/t6_1-0 with properties {}
767701:33:12.339 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] No checkpointed highwatermark is found for partition t6_1-0
767801:33:12.339 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] Log loaded for partition t6_1-0 with initial high watermark 0
767901:33:12.339 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t6_1-0 with topic id Some(MAWQIrsBTlCNdJ3-k3deEw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
768001:33:13.319 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
768101:33:13.320 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
768201:33:13.320 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
768301:33:13.320 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
768401:33:13.320 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
768501:33:13.321 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-17 unregistered
768601:33:13.321 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7687 acks = -1
7688 batch.size = 16384
7689 bootstrap.servers = [localhost:6001]
7690 buffer.memory = 33554432
7691 client.dns.lookup = use_all_dns_ips
7692 client.id = producer-18
7693 compression.gzip.level = -1
7694 compression.lz4.level = 9
7695 compression.type = none
7696 compression.zstd.level = 3
7697 connections.max.idle.ms = 540000
7698 delivery.timeout.ms = 120000
7699 enable.idempotence = true
7700 enable.metrics.push = true
7701 interceptor.classes = []
7702 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7703 linger.ms = 5
7704 max.block.ms = 10000
7705 max.in.flight.requests.per.connection = 5
7706 max.request.size = 1048576
7707 metadata.max.age.ms = 300000
7708 metadata.max.idle.ms = 300000
7709 metadata.recovery.rebootstrap.trigger.ms = 300000
7710 metadata.recovery.strategy = rebootstrap
7711 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7712 metrics.num.samples = 2
7713 metrics.recording.level = INFO
7714 metrics.sample.window.ms = 30000
7715 partitioner.adaptive.partitioning.enable = true
7716 partitioner.availability.timeout.ms = 0
7717 partitioner.class = null
7718 partitioner.ignore.keys = false
7719 receive.buffer.bytes = 32768
7720 reconnect.backoff.max.ms = 1000
7721 reconnect.backoff.ms = 50
7722 request.timeout.ms = 30000
7723 retries = 2147483647
7724 retry.backoff.max.ms = 1000
7725 retry.backoff.ms = 1000
7726 sasl.client.callback.handler.class = null
7727 sasl.jaas.config = null
7728 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7729 sasl.kerberos.min.time.before.relogin = 60000
7730 sasl.kerberos.service.name = null
7731 sasl.kerberos.ticket.renew.jitter = 0.05
7732 sasl.kerberos.ticket.renew.window.factor = 0.8
7733 sasl.login.callback.handler.class = null
7734 sasl.login.class = null
7735 sasl.login.connect.timeout.ms = null
7736 sasl.login.read.timeout.ms = null
7737 sasl.login.refresh.buffer.seconds = 300
7738 sasl.login.refresh.min.period.seconds = 60
7739 sasl.login.refresh.window.factor = 0.8
7740 sasl.login.refresh.window.jitter = 0.05
7741 sasl.login.retry.backoff.max.ms = 10000
7742 sasl.login.retry.backoff.ms = 100
7743 sasl.mechanism = GSSAPI
7744 sasl.oauthbearer.assertion.algorithm = RS256
7745 sasl.oauthbearer.assertion.claim.aud = null
7746 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7747 sasl.oauthbearer.assertion.claim.iss = null
7748 sasl.oauthbearer.assertion.claim.jti.include = false
7749 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7750 sasl.oauthbearer.assertion.claim.sub = null
7751 sasl.oauthbearer.assertion.file = null
7752 sasl.oauthbearer.assertion.private.key.file = null
7753 sasl.oauthbearer.assertion.private.key.passphrase = null
7754 sasl.oauthbearer.assertion.template.file = null
7755 sasl.oauthbearer.client.credentials.client.id = null
7756 sasl.oauthbearer.client.credentials.client.secret = null
7757 sasl.oauthbearer.clock.skew.seconds = 30
7758 sasl.oauthbearer.expected.audience = null
7759 sasl.oauthbearer.expected.issuer = null
7760 sasl.oauthbearer.header.urlencode = false
7761 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7762 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7763 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7764 sasl.oauthbearer.jwks.endpoint.url = null
7765 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7766 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7767 sasl.oauthbearer.scope = null
7768 sasl.oauthbearer.scope.claim.name = scope
7769 sasl.oauthbearer.sub.claim.name = sub
7770 sasl.oauthbearer.token.endpoint.url = null
7771 security.protocol = PLAINTEXT
7772 security.providers = null
7773 send.buffer.bytes = 131072
7774 socket.connection.setup.timeout.max.ms = 30000
7775 socket.connection.setup.timeout.ms = 10000
7776 ssl.cipher.suites = null
7777 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7778 ssl.endpoint.identification.algorithm = https
7779 ssl.engine.factory.class = null
7780 ssl.key.password = null
7781 ssl.keymanager.algorithm = SunX509
7782 ssl.keystore.certificate.chain = null
7783 ssl.keystore.key = null
7784 ssl.keystore.location = null
7785 ssl.keystore.password = null
7786 ssl.keystore.type = JKS
7787 ssl.protocol = TLSv1.3
7788 ssl.provider = null
7789 ssl.secure.random.implementation = null
7790 ssl.trustmanager.algorithm = PKIX
7791 ssl.truststore.certificates = null
7792 ssl.truststore.location = null
7793 ssl.truststore.password = null
7794 ssl.truststore.type = JKS
7795 transaction.timeout.ms = 60000
7796 transaction.two.phase.commit.enable = false
7797 transactional.id = null
7798 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7799
780001:33:13.321 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
780101:33:13.321 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Instantiated an idempotent producer.
780201:33:13.323 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
780301:33:13.323 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
780401:33:13.323 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832393323
780501:33:13.326 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.Metadata - [Producer clientId=producer-18] Cluster ID: 4oa31apqQtabsfPXH-H0RA
780601:33:13.326 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-18] ProducerId set to 17 with epoch 0
780701:33:13.334 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
780801:33:13.335 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
780901:33:13.335 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
781001:33:13.336 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
781101:33:13.336 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
781201:33:13.336 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-18 unregistered
781301:33:13.336 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7814 acks = -1
7815 batch.size = 16384
7816 bootstrap.servers = [localhost:6001]
7817 buffer.memory = 33554432
7818 client.dns.lookup = use_all_dns_ips
7819 client.id = producer-19
7820 compression.gzip.level = -1
7821 compression.lz4.level = 9
7822 compression.type = none
7823 compression.zstd.level = 3
7824 connections.max.idle.ms = 540000
7825 delivery.timeout.ms = 120000
7826 enable.idempotence = true
7827 enable.metrics.push = true
7828 interceptor.classes = []
7829 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7830 linger.ms = 5
7831 max.block.ms = 10000
7832 max.in.flight.requests.per.connection = 5
7833 max.request.size = 1048576
7834 metadata.max.age.ms = 300000
7835 metadata.max.idle.ms = 300000
7836 metadata.recovery.rebootstrap.trigger.ms = 300000
7837 metadata.recovery.strategy = rebootstrap
7838 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7839 metrics.num.samples = 2
7840 metrics.recording.level = INFO
7841 metrics.sample.window.ms = 30000
7842 partitioner.adaptive.partitioning.enable = true
7843 partitioner.availability.timeout.ms = 0
7844 partitioner.class = null
7845 partitioner.ignore.keys = false
7846 receive.buffer.bytes = 32768
7847 reconnect.backoff.max.ms = 1000
7848 reconnect.backoff.ms = 50
7849 request.timeout.ms = 30000
7850 retries = 2147483647
7851 retry.backoff.max.ms = 1000
7852 retry.backoff.ms = 1000
7853 sasl.client.callback.handler.class = null
7854 sasl.jaas.config = null
7855 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7856 sasl.kerberos.min.time.before.relogin = 60000
7857 sasl.kerberos.service.name = null
7858 sasl.kerberos.ticket.renew.jitter = 0.05
7859 sasl.kerberos.ticket.renew.window.factor = 0.8
7860 sasl.login.callback.handler.class = null
7861 sasl.login.class = null
7862 sasl.login.connect.timeout.ms = null
7863 sasl.login.read.timeout.ms = null
7864 sasl.login.refresh.buffer.seconds = 300
7865 sasl.login.refresh.min.period.seconds = 60
7866 sasl.login.refresh.window.factor = 0.8
7867 sasl.login.refresh.window.jitter = 0.05
7868 sasl.login.retry.backoff.max.ms = 10000
7869 sasl.login.retry.backoff.ms = 100
7870 sasl.mechanism = GSSAPI
7871 sasl.oauthbearer.assertion.algorithm = RS256
7872 sasl.oauthbearer.assertion.claim.aud = null
7873 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7874 sasl.oauthbearer.assertion.claim.iss = null
7875 sasl.oauthbearer.assertion.claim.jti.include = false
7876 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7877 sasl.oauthbearer.assertion.claim.sub = null
7878 sasl.oauthbearer.assertion.file = null
7879 sasl.oauthbearer.assertion.private.key.file = null
7880 sasl.oauthbearer.assertion.private.key.passphrase = null
7881 sasl.oauthbearer.assertion.template.file = null
7882 sasl.oauthbearer.client.credentials.client.id = null
7883 sasl.oauthbearer.client.credentials.client.secret = null
7884 sasl.oauthbearer.clock.skew.seconds = 30
7885 sasl.oauthbearer.expected.audience = null
7886 sasl.oauthbearer.expected.issuer = null
7887 sasl.oauthbearer.header.urlencode = false
7888 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7889 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7890 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7891 sasl.oauthbearer.jwks.endpoint.url = null
7892 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7893 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7894 sasl.oauthbearer.scope = null
7895 sasl.oauthbearer.scope.claim.name = scope
7896 sasl.oauthbearer.sub.claim.name = sub
7897 sasl.oauthbearer.token.endpoint.url = null
7898 security.protocol = PLAINTEXT
7899 security.providers = null
7900 send.buffer.bytes = 131072
7901 socket.connection.setup.timeout.max.ms = 30000
7902 socket.connection.setup.timeout.ms = 10000
7903 ssl.cipher.suites = null
7904 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7905 ssl.endpoint.identification.algorithm = https
7906 ssl.engine.factory.class = null
7907 ssl.key.password = null
7908 ssl.keymanager.algorithm = SunX509
7909 ssl.keystore.certificate.chain = null
7910 ssl.keystore.key = null
7911 ssl.keystore.location = null
7912 ssl.keystore.password = null
7913 ssl.keystore.type = JKS
7914 ssl.protocol = TLSv1.3
7915 ssl.provider = null
7916 ssl.secure.random.implementation = null
7917 ssl.trustmanager.algorithm = PKIX
7918 ssl.truststore.certificates = null
7919 ssl.truststore.location = null
7920 ssl.truststore.password = null
7921 ssl.truststore.type = JKS
7922 transaction.timeout.ms = 60000
7923 transaction.two.phase.commit.enable = false
7924 transactional.id = null
7925 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7926
792701:33:13.336 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
792801:33:13.337 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Instantiated an idempotent producer.
792901:33:13.338 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
793001:33:13.340 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
793101:33:13.340 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832393338
793201:33:13.341 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.Metadata - [Producer clientId=producer-19] Cluster ID: 4oa31apqQtabsfPXH-H0RA
793301:33:13.342 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-19] ProducerId set to 18 with epoch 0
793401:33:13.354 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
793501:33:13.356 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
793601:33:13.356 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
793701:33:13.356 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
793801:33:13.356 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
793901:33:13.356 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-19 unregistered
794001:33:13.359 [virtual-693] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7941 allow.auto.create.topics = true
7942 auto.commit.interval.ms = 5000
7943 auto.offset.reset = earliest
7944 bootstrap.servers = [localhost:6001]
7945 check.crcs = true
7946 client.dns.lookup = use_all_dns_ips
7947 client.id = consumer-g6_1-12
7948 client.rack =
7949 connections.max.idle.ms = 540000
7950 default.api.timeout.ms = 60000
7951 enable.auto.commit = false
7952 enable.metrics.push = true
7953 exclude.internal.topics = true
7954 fetch.max.bytes = 52428800
7955 fetch.max.wait.ms = 500
7956 fetch.min.bytes = 1
7957 group.id = g6_1
7958 group.instance.id = null
7959 group.protocol = classic
7960 group.remote.assignor = null
7961 heartbeat.interval.ms = 3000
7962 interceptor.classes = []
7963 internal.leave.group.on.close = true
7964 internal.throw.on.fetch.stable.offset.unsupported = false
7965 isolation.level = read_uncommitted
7966 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7967 max.partition.fetch.bytes = 1048576
7968 max.poll.interval.ms = 300000
7969 max.poll.records = 500
7970 metadata.max.age.ms = 300000
7971 metadata.recovery.rebootstrap.trigger.ms = 300000
7972 metadata.recovery.strategy = rebootstrap
7973 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7974 metrics.num.samples = 2
7975 metrics.recording.level = INFO
7976 metrics.sample.window.ms = 30000
7977 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7978 receive.buffer.bytes = 65536
7979 reconnect.backoff.max.ms = 1000
7980 reconnect.backoff.ms = 50
7981 request.timeout.ms = 30000
7982 retry.backoff.max.ms = 1000
7983 retry.backoff.ms = 100
7984 sasl.client.callback.handler.class = null
7985 sasl.jaas.config = null
7986 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7987 sasl.kerberos.min.time.before.relogin = 60000
7988 sasl.kerberos.service.name = null
7989 sasl.kerberos.ticket.renew.jitter = 0.05
7990 sasl.kerberos.ticket.renew.window.factor = 0.8
7991 sasl.login.callback.handler.class = null
7992 sasl.login.class = null
7993 sasl.login.connect.timeout.ms = null
7994 sasl.login.read.timeout.ms = null
7995 sasl.login.refresh.buffer.seconds = 300
7996 sasl.login.refresh.min.period.seconds = 60
7997 sasl.login.refresh.window.factor = 0.8
7998 sasl.login.refresh.window.jitter = 0.05
7999 sasl.login.retry.backoff.max.ms = 10000
8000 sasl.login.retry.backoff.ms = 100
8001 sasl.mechanism = GSSAPI
8002 sasl.oauthbearer.assertion.algorithm = RS256
8003 sasl.oauthbearer.assertion.claim.aud = null
8004 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8005 sasl.oauthbearer.assertion.claim.iss = null
8006 sasl.oauthbearer.assertion.claim.jti.include = false
8007 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8008 sasl.oauthbearer.assertion.claim.sub = null
8009 sasl.oauthbearer.assertion.file = null
8010 sasl.oauthbearer.assertion.private.key.file = null
8011 sasl.oauthbearer.assertion.private.key.passphrase = null
8012 sasl.oauthbearer.assertion.template.file = null
8013 sasl.oauthbearer.client.credentials.client.id = null
8014 sasl.oauthbearer.client.credentials.client.secret = null
8015 sasl.oauthbearer.clock.skew.seconds = 30
8016 sasl.oauthbearer.expected.audience = null
8017 sasl.oauthbearer.expected.issuer = null
8018 sasl.oauthbearer.header.urlencode = false
8019 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8020 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8021 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8022 sasl.oauthbearer.jwks.endpoint.url = null
8023 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8024 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8025 sasl.oauthbearer.scope = null
8026 sasl.oauthbearer.scope.claim.name = scope
8027 sasl.oauthbearer.sub.claim.name = sub
8028 sasl.oauthbearer.token.endpoint.url = null
8029 security.protocol = PLAINTEXT
8030 security.providers = null
8031 send.buffer.bytes = 131072
8032 session.timeout.ms = 45000
8033 share.acknowledgement.mode = implicit
8034 socket.connection.setup.timeout.max.ms = 30000
8035 socket.connection.setup.timeout.ms = 10000
8036 ssl.cipher.suites = null
8037 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8038 ssl.endpoint.identification.algorithm = https
8039 ssl.engine.factory.class = null
8040 ssl.key.password = null
8041 ssl.keymanager.algorithm = SunX509
8042 ssl.keystore.certificate.chain = null
8043 ssl.keystore.key = null
8044 ssl.keystore.location = null
8045 ssl.keystore.password = null
8046 ssl.keystore.type = JKS
8047 ssl.protocol = TLSv1.3
8048 ssl.provider = null
8049 ssl.secure.random.implementation = null
8050 ssl.trustmanager.algorithm = PKIX
8051 ssl.truststore.certificates = null
8052 ssl.truststore.location = null
8053 ssl.truststore.password = null
8054 ssl.truststore.type = JKS
8055 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8056
805701:33:13.359 [virtual-693] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
805801:33:13.361 [virtual-693] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
805901:33:13.361 [virtual-693] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
806001:33:13.361 [virtual-693] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832393361
806101:33:13.363 [virtual-694] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Subscribed to topic(s): t6_1
806201:33:13.366 [virtual-694] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
806301:33:13.366 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
806401:33:13.367 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
806501:33:13.369 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b and requesting the member to rejoin with this id.
806601:33:13.369 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b
806701:33:13.369 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
806801:33:13.369 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b joins group g6_1 in Empty state. Adding to the group now.
806901:33:13.369 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b).
807001:33:16.369 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 1 with 1 members.
807101:33:16.370 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b', protocol='range'}
807201:33:16.370 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Finished assignment for group at generation 1: {consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b=Assignment(partitions=[t6_1-0])}
807301:33:16.371 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b for group g6_1 for generation 1. The group has 1 members, 0 of which are static.
807401:33:16.377 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b', protocol='range'}
807501:33:16.377 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
807601:33:16.377 [virtual-694] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
807701:33:16.378 [virtual-694] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Found no committed offset for partition t6_1-0
807801:33:16.381 [virtual-694] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
807901:33:18.384 [virtual-694] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8080java.lang.InterruptedException: null
8081 ... 18 common frames omitted
8082Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8083 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8084 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8085 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8086 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8087 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8088 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8089 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8090 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8091 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8092 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8093 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8094 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8095 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8096 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8097 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8098 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8099 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8100 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
810101:33:18.384 [virtual-696] ERROR o.k.KafkaFlow$ - Exception when polling for records
8102java.lang.InterruptedException: null
8103 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8104 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8105 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8106 at ox.channels.ActorRef.ask(actor.scala:64)
8107 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8108 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
8109 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8110 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8111 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
8112 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
8113 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
8114 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
8115 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8116 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
8117 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
8118 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
8119 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
8120 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8121 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8122 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
812301:33:18.385 [virtual-701] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
812401:33:18.385 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Member consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
812501:33:18.385 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
812601:33:18.385 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
812701:33:18.386 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
812801:33:18.386 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_1-12-b9bf6f6c-aecc-4277-a24e-8dd570b4dd9b) members.).
812901:33:18.386 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 2 is now empty.
813001:33:18.396 [virtual-701] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
813101:33:18.396 [virtual-701] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
813201:33:18.396 [virtual-701] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
813301:33:18.396 [virtual-701] INFO o.a.k.c.m.Metrics - Metrics reporters closed
813401:33:18.398 [virtual-701] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-12 unregistered
813501:33:18.398 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8136 acks = -1
8137 batch.size = 16384
8138 bootstrap.servers = [localhost:6001]
8139 buffer.memory = 33554432
8140 client.dns.lookup = use_all_dns_ips
8141 client.id = producer-20
8142 compression.gzip.level = -1
8143 compression.lz4.level = 9
8144 compression.type = none
8145 compression.zstd.level = 3
8146 connections.max.idle.ms = 540000
8147 delivery.timeout.ms = 120000
8148 enable.idempotence = true
8149 enable.metrics.push = true
8150 interceptor.classes = []
8151 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8152 linger.ms = 5
8153 max.block.ms = 10000
8154 max.in.flight.requests.per.connection = 5
8155 max.request.size = 1048576
8156 metadata.max.age.ms = 300000
8157 metadata.max.idle.ms = 300000
8158 metadata.recovery.rebootstrap.trigger.ms = 300000
8159 metadata.recovery.strategy = rebootstrap
8160 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8161 metrics.num.samples = 2
8162 metrics.recording.level = INFO
8163 metrics.sample.window.ms = 30000
8164 partitioner.adaptive.partitioning.enable = true
8165 partitioner.availability.timeout.ms = 0
8166 partitioner.class = null
8167 partitioner.ignore.keys = false
8168 receive.buffer.bytes = 32768
8169 reconnect.backoff.max.ms = 1000
8170 reconnect.backoff.ms = 50
8171 request.timeout.ms = 30000
8172 retries = 2147483647
8173 retry.backoff.max.ms = 1000
8174 retry.backoff.ms = 1000
8175 sasl.client.callback.handler.class = null
8176 sasl.jaas.config = null
8177 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8178 sasl.kerberos.min.time.before.relogin = 60000
8179 sasl.kerberos.service.name = null
8180 sasl.kerberos.ticket.renew.jitter = 0.05
8181 sasl.kerberos.ticket.renew.window.factor = 0.8
8182 sasl.login.callback.handler.class = null
8183 sasl.login.class = null
8184 sasl.login.connect.timeout.ms = null
8185 sasl.login.read.timeout.ms = null
8186 sasl.login.refresh.buffer.seconds = 300
8187 sasl.login.refresh.min.period.seconds = 60
8188 sasl.login.refresh.window.factor = 0.8
8189 sasl.login.refresh.window.jitter = 0.05
8190 sasl.login.retry.backoff.max.ms = 10000
8191 sasl.login.retry.backoff.ms = 100
8192 sasl.mechanism = GSSAPI
8193 sasl.oauthbearer.assertion.algorithm = RS256
8194 sasl.oauthbearer.assertion.claim.aud = null
8195 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8196 sasl.oauthbearer.assertion.claim.iss = null
8197 sasl.oauthbearer.assertion.claim.jti.include = false
8198 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8199 sasl.oauthbearer.assertion.claim.sub = null
8200 sasl.oauthbearer.assertion.file = null
8201 sasl.oauthbearer.assertion.private.key.file = null
8202 sasl.oauthbearer.assertion.private.key.passphrase = null
8203 sasl.oauthbearer.assertion.template.file = null
8204 sasl.oauthbearer.client.credentials.client.id = null
8205 sasl.oauthbearer.client.credentials.client.secret = null
8206 sasl.oauthbearer.clock.skew.seconds = 30
8207 sasl.oauthbearer.expected.audience = null
8208 sasl.oauthbearer.expected.issuer = null
8209 sasl.oauthbearer.header.urlencode = false
8210 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8211 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8212 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8213 sasl.oauthbearer.jwks.endpoint.url = null
8214 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8215 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8216 sasl.oauthbearer.scope = null
8217 sasl.oauthbearer.scope.claim.name = scope
8218 sasl.oauthbearer.sub.claim.name = sub
8219 sasl.oauthbearer.token.endpoint.url = null
8220 security.protocol = PLAINTEXT
8221 security.providers = null
8222 send.buffer.bytes = 131072
8223 socket.connection.setup.timeout.max.ms = 30000
8224 socket.connection.setup.timeout.ms = 10000
8225 ssl.cipher.suites = null
8226 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8227 ssl.endpoint.identification.algorithm = https
8228 ssl.engine.factory.class = null
8229 ssl.key.password = null
8230 ssl.keymanager.algorithm = SunX509
8231 ssl.keystore.certificate.chain = null
8232 ssl.keystore.key = null
8233 ssl.keystore.location = null
8234 ssl.keystore.password = null
8235 ssl.keystore.type = JKS
8236 ssl.protocol = TLSv1.3
8237 ssl.provider = null
8238 ssl.secure.random.implementation = null
8239 ssl.trustmanager.algorithm = PKIX
8240 ssl.truststore.certificates = null
8241 ssl.truststore.location = null
8242 ssl.truststore.password = null
8243 ssl.truststore.type = JKS
8244 transaction.timeout.ms = 60000
8245 transaction.two.phase.commit.enable = false
8246 transactional.id = null
8247 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8248
824901:33:18.399 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
825001:33:18.399 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Instantiated an idempotent producer.
825101:33:18.402 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
825201:33:18.402 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
825301:33:18.402 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832398401
825401:33:18.405 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.Metadata - [Producer clientId=producer-20] Cluster ID: 4oa31apqQtabsfPXH-H0RA
825501:33:18.405 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-20] ProducerId set to 19 with epoch 0
825601:33:18.413 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
825701:33:18.415 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
825801:33:18.415 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
825901:33:18.415 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
826001:33:18.415 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
826101:33:18.416 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-20 unregistered
826201:33:18.417 [virtual-703] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8263 allow.auto.create.topics = true
8264 auto.commit.interval.ms = 5000
8265 auto.offset.reset = earliest
8266 bootstrap.servers = [localhost:6001]
8267 check.crcs = true
8268 client.dns.lookup = use_all_dns_ips
8269 client.id = consumer-g6_1-13
8270 client.rack =
8271 connections.max.idle.ms = 540000
8272 default.api.timeout.ms = 60000
8273 enable.auto.commit = false
8274 enable.metrics.push = true
8275 exclude.internal.topics = true
8276 fetch.max.bytes = 52428800
8277 fetch.max.wait.ms = 500
8278 fetch.min.bytes = 1
8279 group.id = g6_1
8280 group.instance.id = null
8281 group.protocol = classic
8282 group.remote.assignor = null
8283 heartbeat.interval.ms = 3000
8284 interceptor.classes = []
8285 internal.leave.group.on.close = true
8286 internal.throw.on.fetch.stable.offset.unsupported = false
8287 isolation.level = read_uncommitted
8288 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8289 max.partition.fetch.bytes = 1048576
8290 max.poll.interval.ms = 300000
8291 max.poll.records = 500
8292 metadata.max.age.ms = 300000
8293 metadata.recovery.rebootstrap.trigger.ms = 300000
8294 metadata.recovery.strategy = rebootstrap
8295 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8296 metrics.num.samples = 2
8297 metrics.recording.level = INFO
8298 metrics.sample.window.ms = 30000
8299 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8300 receive.buffer.bytes = 65536
8301 reconnect.backoff.max.ms = 1000
8302 reconnect.backoff.ms = 50
8303 request.timeout.ms = 30000
8304 retry.backoff.max.ms = 1000
8305 retry.backoff.ms = 100
8306 sasl.client.callback.handler.class = null
8307 sasl.jaas.config = null
8308 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8309 sasl.kerberos.min.time.before.relogin = 60000
8310 sasl.kerberos.service.name = null
8311 sasl.kerberos.ticket.renew.jitter = 0.05
8312 sasl.kerberos.ticket.renew.window.factor = 0.8
8313 sasl.login.callback.handler.class = null
8314 sasl.login.class = null
8315 sasl.login.connect.timeout.ms = null
8316 sasl.login.read.timeout.ms = null
8317 sasl.login.refresh.buffer.seconds = 300
8318 sasl.login.refresh.min.period.seconds = 60
8319 sasl.login.refresh.window.factor = 0.8
8320 sasl.login.refresh.window.jitter = 0.05
8321 sasl.login.retry.backoff.max.ms = 10000
8322 sasl.login.retry.backoff.ms = 100
8323 sasl.mechanism = GSSAPI
8324 sasl.oauthbearer.assertion.algorithm = RS256
8325 sasl.oauthbearer.assertion.claim.aud = null
8326 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8327 sasl.oauthbearer.assertion.claim.iss = null
8328 sasl.oauthbearer.assertion.claim.jti.include = false
8329 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8330 sasl.oauthbearer.assertion.claim.sub = null
8331 sasl.oauthbearer.assertion.file = null
8332 sasl.oauthbearer.assertion.private.key.file = null
8333 sasl.oauthbearer.assertion.private.key.passphrase = null
8334 sasl.oauthbearer.assertion.template.file = null
8335 sasl.oauthbearer.client.credentials.client.id = null
8336 sasl.oauthbearer.client.credentials.client.secret = null
8337 sasl.oauthbearer.clock.skew.seconds = 30
8338 sasl.oauthbearer.expected.audience = null
8339 sasl.oauthbearer.expected.issuer = null
8340 sasl.oauthbearer.header.urlencode = false
8341 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8342 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8343 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8344 sasl.oauthbearer.jwks.endpoint.url = null
8345 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8346 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8347 sasl.oauthbearer.scope = null
8348 sasl.oauthbearer.scope.claim.name = scope
8349 sasl.oauthbearer.sub.claim.name = sub
8350 sasl.oauthbearer.token.endpoint.url = null
8351 security.protocol = PLAINTEXT
8352 security.providers = null
8353 send.buffer.bytes = 131072
8354 session.timeout.ms = 45000
8355 share.acknowledgement.mode = implicit
8356 socket.connection.setup.timeout.max.ms = 30000
8357 socket.connection.setup.timeout.ms = 10000
8358 ssl.cipher.suites = null
8359 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8360 ssl.endpoint.identification.algorithm = https
8361 ssl.engine.factory.class = null
8362 ssl.key.password = null
8363 ssl.keymanager.algorithm = SunX509
8364 ssl.keystore.certificate.chain = null
8365 ssl.keystore.key = null
8366 ssl.keystore.location = null
8367 ssl.keystore.password = null
8368 ssl.keystore.type = JKS
8369 ssl.protocol = TLSv1.3
8370 ssl.provider = null
8371 ssl.secure.random.implementation = null
8372 ssl.trustmanager.algorithm = PKIX
8373 ssl.truststore.certificates = null
8374 ssl.truststore.location = null
8375 ssl.truststore.password = null
8376 ssl.truststore.type = JKS
8377 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8378
837901:33:18.417 [virtual-703] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
838001:33:18.419 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
838101:33:18.420 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
838201:33:18.420 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832398419
838301:33:18.420 [virtual-706] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Subscribed to topic(s): t6_1
838401:33:18.423 [virtual-706] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
838501:33:18.424 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
838601:33:18.424 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
838701:33:18.426 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88 and requesting the member to rejoin with this id.
838801:33:18.426 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88
838901:33:18.427 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
839001:33:18.427 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88 joins group g6_1 in Empty state. Adding to the group now.
839101:33:18.427 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88).
839201:33:20.941 [quorum-controller-0-event-handler] INFO o.a.k.c.EventPerformanceMonitor - [QuorumController id=0] In the last 60000 ms period, 353 controller events were completed, which took an average of 9.98 ms each. The slowest event was completeActivation[1](2065106643), which took 34.10 ms.
839301:33:21.427 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 3 with 1 members.
839401:33:21.428 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88', protocol='range'}
839501:33:21.428 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Finished assignment for group at generation 3: {consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88=Assignment(partitions=[t6_1-0])}
839601:33:21.429 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88 for group g6_1 for generation 3. The group has 1 members, 0 of which are static.
839701:33:21.435 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88', protocol='range'}
839801:33:21.435 [virtual-706] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
839901:33:21.435 [virtual-706] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
840001:33:21.436 [virtual-706] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t6_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
840101:33:21.439 [virtual-703] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8402 allow.auto.create.topics = true
8403 auto.commit.interval.ms = 5000
8404 auto.offset.reset = earliest
8405 bootstrap.servers = [localhost:6001]
8406 check.crcs = true
8407 client.dns.lookup = use_all_dns_ips
8408 client.id = consumer-g6_2-14
8409 client.rack =
8410 connections.max.idle.ms = 540000
8411 default.api.timeout.ms = 60000
8412 enable.auto.commit = false
8413 enable.metrics.push = true
8414 exclude.internal.topics = true
8415 fetch.max.bytes = 52428800
8416 fetch.max.wait.ms = 500
8417 fetch.min.bytes = 1
8418 group.id = g6_2
8419 group.instance.id = null
8420 group.protocol = classic
8421 group.remote.assignor = null
8422 heartbeat.interval.ms = 3000
8423 interceptor.classes = []
8424 internal.leave.group.on.close = true
8425 internal.throw.on.fetch.stable.offset.unsupported = false
8426 isolation.level = read_uncommitted
8427 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8428 max.partition.fetch.bytes = 1048576
8429 max.poll.interval.ms = 300000
8430 max.poll.records = 500
8431 metadata.max.age.ms = 300000
8432 metadata.recovery.rebootstrap.trigger.ms = 300000
8433 metadata.recovery.strategy = rebootstrap
8434 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8435 metrics.num.samples = 2
8436 metrics.recording.level = INFO
8437 metrics.sample.window.ms = 30000
8438 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8439 receive.buffer.bytes = 65536
8440 reconnect.backoff.max.ms = 1000
8441 reconnect.backoff.ms = 50
8442 request.timeout.ms = 30000
8443 retry.backoff.max.ms = 1000
8444 retry.backoff.ms = 100
8445 sasl.client.callback.handler.class = null
8446 sasl.jaas.config = null
8447 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8448 sasl.kerberos.min.time.before.relogin = 60000
8449 sasl.kerberos.service.name = null
8450 sasl.kerberos.ticket.renew.jitter = 0.05
8451 sasl.kerberos.ticket.renew.window.factor = 0.8
8452 sasl.login.callback.handler.class = null
8453 sasl.login.class = null
8454 sasl.login.connect.timeout.ms = null
8455 sasl.login.read.timeout.ms = null
8456 sasl.login.refresh.buffer.seconds = 300
8457 sasl.login.refresh.min.period.seconds = 60
8458 sasl.login.refresh.window.factor = 0.8
8459 sasl.login.refresh.window.jitter = 0.05
8460 sasl.login.retry.backoff.max.ms = 10000
8461 sasl.login.retry.backoff.ms = 100
8462 sasl.mechanism = GSSAPI
8463 sasl.oauthbearer.assertion.algorithm = RS256
8464 sasl.oauthbearer.assertion.claim.aud = null
8465 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8466 sasl.oauthbearer.assertion.claim.iss = null
8467 sasl.oauthbearer.assertion.claim.jti.include = false
8468 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8469 sasl.oauthbearer.assertion.claim.sub = null
8470 sasl.oauthbearer.assertion.file = null
8471 sasl.oauthbearer.assertion.private.key.file = null
8472 sasl.oauthbearer.assertion.private.key.passphrase = null
8473 sasl.oauthbearer.assertion.template.file = null
8474 sasl.oauthbearer.client.credentials.client.id = null
8475 sasl.oauthbearer.client.credentials.client.secret = null
8476 sasl.oauthbearer.clock.skew.seconds = 30
8477 sasl.oauthbearer.expected.audience = null
8478 sasl.oauthbearer.expected.issuer = null
8479 sasl.oauthbearer.header.urlencode = false
8480 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8481 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8482 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8483 sasl.oauthbearer.jwks.endpoint.url = null
8484 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8485 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8486 sasl.oauthbearer.scope = null
8487 sasl.oauthbearer.scope.claim.name = scope
8488 sasl.oauthbearer.sub.claim.name = sub
8489 sasl.oauthbearer.token.endpoint.url = null
8490 security.protocol = PLAINTEXT
8491 security.providers = null
8492 send.buffer.bytes = 131072
8493 session.timeout.ms = 45000
8494 share.acknowledgement.mode = implicit
8495 socket.connection.setup.timeout.max.ms = 30000
8496 socket.connection.setup.timeout.ms = 10000
8497 ssl.cipher.suites = null
8498 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8499 ssl.endpoint.identification.algorithm = https
8500 ssl.engine.factory.class = null
8501 ssl.key.password = null
8502 ssl.keymanager.algorithm = SunX509
8503 ssl.keystore.certificate.chain = null
8504 ssl.keystore.key = null
8505 ssl.keystore.location = null
8506 ssl.keystore.password = null
8507 ssl.keystore.type = JKS
8508 ssl.protocol = TLSv1.3
8509 ssl.provider = null
8510 ssl.secure.random.implementation = null
8511 ssl.trustmanager.algorithm = PKIX
8512 ssl.truststore.certificates = null
8513 ssl.truststore.location = null
8514 ssl.truststore.password = null
8515 ssl.truststore.type = JKS
8516 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8517
851801:33:21.439 [virtual-703] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
851901:33:21.441 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
852001:33:21.441 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
852101:33:21.441 [virtual-703] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832401441
852201:33:21.442 [virtual-710] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Subscribed to topic(s): t6_1
852301:33:21.445 [virtual-710] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Cluster ID: 4oa31apqQtabsfPXH-H0RA
852401:33:21.445 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
852501:33:21.446 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
852601:33:21.448 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_2 in Empty state. Created a new member id consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893 and requesting the member to rejoin with this id.
852701:33:21.448 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: need to re-join with the given member-id: consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893
852801:33:21.448 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
852901:33:21.449 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893 joins group g6_2 in Empty state. Adding to the group now.
853001:33:21.449 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893).
853101:33:24.448 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_2 generation 1 with 1 members.
853201:33:24.449 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893', protocol='range'}
853301:33:24.449 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Finished assignment for group at generation 1: {consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893=Assignment(partitions=[t6_1-0])}
853401:33:24.450 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893 for group g6_2 for generation 1. The group has 1 members, 0 of which are static.
853501:33:24.456 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893', protocol='range'}
853601:33:24.456 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Notifying assignor about the new Assignment(partitions=[t6_1-0])
853701:33:24.456 [virtual-710] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Adding newly assigned partitions: [t6_1-0]
853801:33:24.457 [virtual-710] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Found no committed offset for partition t6_1-0
853901:33:24.460 [virtual-710] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
854001:33:24.462 [virtual-709] ERROR o.k.KafkaFlow$ - Exception when polling for records
8541java.lang.InterruptedException: null
8542 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8543 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8544 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8545 at ox.channels.ActorRef.ask(actor.scala:64)
8546 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8547 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8548 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8549 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8550 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8551 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8552 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8553 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8554 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8555 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
855601:33:24.462 [virtual-705] ERROR o.k.KafkaFlow$ - Exception when polling for records
8557java.lang.InterruptedException: null
8558 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8559 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8560 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8561 at ox.channels.ActorRef.ask(actor.scala:64)
8562 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8563 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8564 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8565 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8566 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8567 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8568 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8569 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8570 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8571 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
857201:33:24.462 [virtual-710] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8573java.lang.InterruptedException: null
8574 ... 18 common frames omitted
8575Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8576 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8577 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8578 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8579 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8580 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8581 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8582 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8583 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8584 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8585 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8586 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8587 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8588 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8589 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8590 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8591 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8592 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8593 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
859401:33:24.462 [virtual-706] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8595java.lang.InterruptedException: null
8596 ... 18 common frames omitted
8597Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8598 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8599 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8600 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8601 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8602 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8603 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8604 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8605 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8606 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8607 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8608 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8609 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8610 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8611 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8612 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8613 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8614 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8615 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
861601:33:24.462 [virtual-712] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Revoke previously assigned partitions [t6_1-0]
861701:33:24.462 [virtual-712] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Member consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
861801:33:24.463 [virtual-712] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting generation and member id due to: consumer pro-actively leaving the group
861901:33:24.463 [virtual-712] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: consumer pro-actively leaving the group
862001:33:24.463 [virtual-713] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
862101:33:24.463 [virtual-713] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Member consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
862201:33:24.463 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_2] Member consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
862301:33:24.463 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_2-14-1a2c7ecb-62c4-4914-bf58-9e2859bac893) members.).
862401:33:24.463 [virtual-713] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
862501:33:24.463 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_2 with generation 2 is now empty.
862601:33:24.463 [virtual-713] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
862701:33:24.464 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
862801:33:24.464 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g6_1-13-4e9203b6-9282-48bf-948a-13512c976c88) members.).
862901:33:24.464 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 4 is now empty.
863001:33:24.950 [virtual-713] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
863101:33:24.950 [virtual-713] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
863201:33:24.951 [virtual-713] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
863301:33:24.951 [virtual-713] INFO o.a.k.c.m.Metrics - Metrics reporters closed
863401:33:24.952 [virtual-713] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-13 unregistered
863501:33:24.963 [virtual-712] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
863601:33:24.963 [virtual-712] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
863701:33:24.963 [virtual-712] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
863801:33:24.963 [virtual-712] INFO o.a.k.c.m.Metrics - Metrics reporters closed
863901:33:24.965 [virtual-712] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_2-14 unregistered
864001:33:24.966 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8641 acks = -1
8642 batch.size = 16384
8643 bootstrap.servers = [localhost:6001]
8644 buffer.memory = 33554432
8645 client.dns.lookup = use_all_dns_ips
8646 client.id = producer-21
8647 compression.gzip.level = -1
8648 compression.lz4.level = 9
8649 compression.type = none
8650 compression.zstd.level = 3
8651 connections.max.idle.ms = 540000
8652 delivery.timeout.ms = 120000
8653 enable.idempotence = true
8654 enable.metrics.push = true
8655 interceptor.classes = []
8656 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8657 linger.ms = 5
8658 max.block.ms = 10000
8659 max.in.flight.requests.per.connection = 5
8660 max.request.size = 1048576
8661 metadata.max.age.ms = 300000
8662 metadata.max.idle.ms = 300000
8663 metadata.recovery.rebootstrap.trigger.ms = 300000
8664 metadata.recovery.strategy = rebootstrap
8665 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8666 metrics.num.samples = 2
8667 metrics.recording.level = INFO
8668 metrics.sample.window.ms = 30000
8669 partitioner.adaptive.partitioning.enable = true
8670 partitioner.availability.timeout.ms = 0
8671 partitioner.class = null
8672 partitioner.ignore.keys = false
8673 receive.buffer.bytes = 32768
8674 reconnect.backoff.max.ms = 1000
8675 reconnect.backoff.ms = 50
8676 request.timeout.ms = 30000
8677 retries = 2147483647
8678 retry.backoff.max.ms = 1000
8679 retry.backoff.ms = 1000
8680 sasl.client.callback.handler.class = null
8681 sasl.jaas.config = null
8682 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8683 sasl.kerberos.min.time.before.relogin = 60000
8684 sasl.kerberos.service.name = null
8685 sasl.kerberos.ticket.renew.jitter = 0.05
8686 sasl.kerberos.ticket.renew.window.factor = 0.8
8687 sasl.login.callback.handler.class = null
8688 sasl.login.class = null
8689 sasl.login.connect.timeout.ms = null
8690 sasl.login.read.timeout.ms = null
8691 sasl.login.refresh.buffer.seconds = 300
8692 sasl.login.refresh.min.period.seconds = 60
8693 sasl.login.refresh.window.factor = 0.8
8694 sasl.login.refresh.window.jitter = 0.05
8695 sasl.login.retry.backoff.max.ms = 10000
8696 sasl.login.retry.backoff.ms = 100
8697 sasl.mechanism = GSSAPI
8698 sasl.oauthbearer.assertion.algorithm = RS256
8699 sasl.oauthbearer.assertion.claim.aud = null
8700 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8701 sasl.oauthbearer.assertion.claim.iss = null
8702 sasl.oauthbearer.assertion.claim.jti.include = false
8703 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8704 sasl.oauthbearer.assertion.claim.sub = null
8705 sasl.oauthbearer.assertion.file = null
8706 sasl.oauthbearer.assertion.private.key.file = null
8707 sasl.oauthbearer.assertion.private.key.passphrase = null
8708 sasl.oauthbearer.assertion.template.file = null
8709 sasl.oauthbearer.client.credentials.client.id = null
8710 sasl.oauthbearer.client.credentials.client.secret = null
8711 sasl.oauthbearer.clock.skew.seconds = 30
8712 sasl.oauthbearer.expected.audience = null
8713 sasl.oauthbearer.expected.issuer = null
8714 sasl.oauthbearer.header.urlencode = false
8715 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8716 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8717 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8718 sasl.oauthbearer.jwks.endpoint.url = null
8719 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8720 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8721 sasl.oauthbearer.scope = null
8722 sasl.oauthbearer.scope.claim.name = scope
8723 sasl.oauthbearer.sub.claim.name = sub
8724 sasl.oauthbearer.token.endpoint.url = null
8725 security.protocol = PLAINTEXT
8726 security.providers = null
8727 send.buffer.bytes = 131072
8728 socket.connection.setup.timeout.max.ms = 30000
8729 socket.connection.setup.timeout.ms = 10000
8730 ssl.cipher.suites = null
8731 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8732 ssl.endpoint.identification.algorithm = https
8733 ssl.engine.factory.class = null
8734 ssl.key.password = null
8735 ssl.keymanager.algorithm = SunX509
8736 ssl.keystore.certificate.chain = null
8737 ssl.keystore.key = null
8738 ssl.keystore.location = null
8739 ssl.keystore.password = null
8740 ssl.keystore.type = JKS
8741 ssl.protocol = TLSv1.3
8742 ssl.provider = null
8743 ssl.secure.random.implementation = null
8744 ssl.trustmanager.algorithm = PKIX
8745 ssl.truststore.certificates = null
8746 ssl.truststore.location = null
8747 ssl.truststore.password = null
8748 ssl.truststore.type = JKS
8749 transaction.timeout.ms = 60000
8750 transaction.two.phase.commit.enable = false
8751 transactional.id = null
8752 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8753
875401:33:24.967 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
875501:33:24.967 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Instantiated an idempotent producer.
875601:33:24.968 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
875701:33:24.968 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
875801:33:24.968 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832404968
875901:33:24.971 [data-plane-kafka-request-handler-4] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t7_1) to the active controller.
876001:33:24.971 [kafka-producer-network-thread | producer-21] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-21] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t7_1=UNKNOWN_TOPIC_OR_PARTITION}
876101:33:24.971 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.Metadata - [Producer clientId=producer-21] Cluster ID: 4oa31apqQtabsfPXH-H0RA
876201:33:24.971 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t7_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
876301:33:24.972 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t7_1 with topic ID Eep2-JTpRaK0SpxAFmjsyQ.
876401:33:24.972 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t7_1-0 with topic ID Eep2-JTpRaK0SpxAFmjsyQ and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
876501:33:24.973 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-21] ProducerId set to 20 with epoch 0
876601:33:24.987 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
876701:33:24.987 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t7_1-0)
876801:33:24.987 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t7_1-0 with topic id Eep2-JTpRaK0SpxAFmjsyQ.
876901:33:24.990 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t7_1-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
877001:33:24.990 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t7_1-0 in /tmp/kafka-logs15769196062054598040/t7_1-0 with properties {}
877101:33:24.990 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] No checkpointed highwatermark is found for partition t7_1-0
877201:33:24.990 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] Log loaded for partition t7_1-0 with initial high watermark 0
877301:33:24.991 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t7_1-0 with topic id Some(Eep2-JTpRaK0SpxAFmjsyQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
877401:33:25.981 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
877501:33:25.982 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
877601:33:25.982 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
877701:33:25.982 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
877801:33:25.982 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
877901:33:25.983 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-21 unregistered
878001:33:25.983 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8781 acks = -1
8782 batch.size = 16384
8783 bootstrap.servers = [localhost:6001]
8784 buffer.memory = 33554432
8785 client.dns.lookup = use_all_dns_ips
8786 client.id = producer-22
8787 compression.gzip.level = -1
8788 compression.lz4.level = 9
8789 compression.type = none
8790 compression.zstd.level = 3
8791 connections.max.idle.ms = 540000
8792 delivery.timeout.ms = 120000
8793 enable.idempotence = true
8794 enable.metrics.push = true
8795 interceptor.classes = []
8796 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8797 linger.ms = 5
8798 max.block.ms = 10000
8799 max.in.flight.requests.per.connection = 5
8800 max.request.size = 1048576
8801 metadata.max.age.ms = 300000
8802 metadata.max.idle.ms = 300000
8803 metadata.recovery.rebootstrap.trigger.ms = 300000
8804 metadata.recovery.strategy = rebootstrap
8805 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8806 metrics.num.samples = 2
8807 metrics.recording.level = INFO
8808 metrics.sample.window.ms = 30000
8809 partitioner.adaptive.partitioning.enable = true
8810 partitioner.availability.timeout.ms = 0
8811 partitioner.class = null
8812 partitioner.ignore.keys = false
8813 receive.buffer.bytes = 32768
8814 reconnect.backoff.max.ms = 1000
8815 reconnect.backoff.ms = 50
8816 request.timeout.ms = 30000
8817 retries = 2147483647
8818 retry.backoff.max.ms = 1000
8819 retry.backoff.ms = 1000
8820 sasl.client.callback.handler.class = null
8821 sasl.jaas.config = null
8822 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8823 sasl.kerberos.min.time.before.relogin = 60000
8824 sasl.kerberos.service.name = null
8825 sasl.kerberos.ticket.renew.jitter = 0.05
8826 sasl.kerberos.ticket.renew.window.factor = 0.8
8827 sasl.login.callback.handler.class = null
8828 sasl.login.class = null
8829 sasl.login.connect.timeout.ms = null
8830 sasl.login.read.timeout.ms = null
8831 sasl.login.refresh.buffer.seconds = 300
8832 sasl.login.refresh.min.period.seconds = 60
8833 sasl.login.refresh.window.factor = 0.8
8834 sasl.login.refresh.window.jitter = 0.05
8835 sasl.login.retry.backoff.max.ms = 10000
8836 sasl.login.retry.backoff.ms = 100
8837 sasl.mechanism = GSSAPI
8838 sasl.oauthbearer.assertion.algorithm = RS256
8839 sasl.oauthbearer.assertion.claim.aud = null
8840 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8841 sasl.oauthbearer.assertion.claim.iss = null
8842 sasl.oauthbearer.assertion.claim.jti.include = false
8843 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8844 sasl.oauthbearer.assertion.claim.sub = null
8845 sasl.oauthbearer.assertion.file = null
8846 sasl.oauthbearer.assertion.private.key.file = null
8847 sasl.oauthbearer.assertion.private.key.passphrase = null
8848 sasl.oauthbearer.assertion.template.file = null
8849 sasl.oauthbearer.client.credentials.client.id = null
8850 sasl.oauthbearer.client.credentials.client.secret = null
8851 sasl.oauthbearer.clock.skew.seconds = 30
8852 sasl.oauthbearer.expected.audience = null
8853 sasl.oauthbearer.expected.issuer = null
8854 sasl.oauthbearer.header.urlencode = false
8855 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8856 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8857 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8858 sasl.oauthbearer.jwks.endpoint.url = null
8859 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8860 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8861 sasl.oauthbearer.scope = null
8862 sasl.oauthbearer.scope.claim.name = scope
8863 sasl.oauthbearer.sub.claim.name = sub
8864 sasl.oauthbearer.token.endpoint.url = null
8865 security.protocol = PLAINTEXT
8866 security.providers = null
8867 send.buffer.bytes = 131072
8868 socket.connection.setup.timeout.max.ms = 30000
8869 socket.connection.setup.timeout.ms = 10000
8870 ssl.cipher.suites = null
8871 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8872 ssl.endpoint.identification.algorithm = https
8873 ssl.engine.factory.class = null
8874 ssl.key.password = null
8875 ssl.keymanager.algorithm = SunX509
8876 ssl.keystore.certificate.chain = null
8877 ssl.keystore.key = null
8878 ssl.keystore.location = null
8879 ssl.keystore.password = null
8880 ssl.keystore.type = JKS
8881 ssl.protocol = TLSv1.3
8882 ssl.provider = null
8883 ssl.secure.random.implementation = null
8884 ssl.trustmanager.algorithm = PKIX
8885 ssl.truststore.certificates = null
8886 ssl.truststore.location = null
8887 ssl.truststore.password = null
8888 ssl.truststore.type = JKS
8889 transaction.timeout.ms = 60000
8890 transaction.two.phase.commit.enable = false
8891 transactional.id = null
8892 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8893
889401:33:25.984 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
889501:33:25.984 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Instantiated an idempotent producer.
889601:33:25.985 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
889701:33:25.986 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
889801:33:25.986 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832405985
889901:33:25.989 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.Metadata - [Producer clientId=producer-22] Cluster ID: 4oa31apqQtabsfPXH-H0RA
890001:33:25.989 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-22] ProducerId set to 21 with epoch 0
890101:33:25.998 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
890201:33:25.999 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
890301:33:25.999 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
890401:33:25.999 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
890501:33:25.999 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
890601:33:25.999 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-22 unregistered
890701:33:26.000 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8908 acks = -1
8909 batch.size = 16384
8910 bootstrap.servers = [localhost:6001]
8911 buffer.memory = 33554432
8912 client.dns.lookup = use_all_dns_ips
8913 client.id = producer-23
8914 compression.gzip.level = -1
8915 compression.lz4.level = 9
8916 compression.type = none
8917 compression.zstd.level = 3
8918 connections.max.idle.ms = 540000
8919 delivery.timeout.ms = 120000
8920 enable.idempotence = true
8921 enable.metrics.push = true
8922 interceptor.classes = []
8923 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8924 linger.ms = 5
8925 max.block.ms = 10000
8926 max.in.flight.requests.per.connection = 5
8927 max.request.size = 1048576
8928 metadata.max.age.ms = 300000
8929 metadata.max.idle.ms = 300000
8930 metadata.recovery.rebootstrap.trigger.ms = 300000
8931 metadata.recovery.strategy = rebootstrap
8932 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8933 metrics.num.samples = 2
8934 metrics.recording.level = INFO
8935 metrics.sample.window.ms = 30000
8936 partitioner.adaptive.partitioning.enable = true
8937 partitioner.availability.timeout.ms = 0
8938 partitioner.class = null
8939 partitioner.ignore.keys = false
8940 receive.buffer.bytes = 32768
8941 reconnect.backoff.max.ms = 1000
8942 reconnect.backoff.ms = 50
8943 request.timeout.ms = 30000
8944 retries = 2147483647
8945 retry.backoff.max.ms = 1000
8946 retry.backoff.ms = 1000
8947 sasl.client.callback.handler.class = null
8948 sasl.jaas.config = null
8949 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8950 sasl.kerberos.min.time.before.relogin = 60000
8951 sasl.kerberos.service.name = null
8952 sasl.kerberos.ticket.renew.jitter = 0.05
8953 sasl.kerberos.ticket.renew.window.factor = 0.8
8954 sasl.login.callback.handler.class = null
8955 sasl.login.class = null
8956 sasl.login.connect.timeout.ms = null
8957 sasl.login.read.timeout.ms = null
8958 sasl.login.refresh.buffer.seconds = 300
8959 sasl.login.refresh.min.period.seconds = 60
8960 sasl.login.refresh.window.factor = 0.8
8961 sasl.login.refresh.window.jitter = 0.05
8962 sasl.login.retry.backoff.max.ms = 10000
8963 sasl.login.retry.backoff.ms = 100
8964 sasl.mechanism = GSSAPI
8965 sasl.oauthbearer.assertion.algorithm = RS256
8966 sasl.oauthbearer.assertion.claim.aud = null
8967 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8968 sasl.oauthbearer.assertion.claim.iss = null
8969 sasl.oauthbearer.assertion.claim.jti.include = false
8970 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8971 sasl.oauthbearer.assertion.claim.sub = null
8972 sasl.oauthbearer.assertion.file = null
8973 sasl.oauthbearer.assertion.private.key.file = null
8974 sasl.oauthbearer.assertion.private.key.passphrase = null
8975 sasl.oauthbearer.assertion.template.file = null
8976 sasl.oauthbearer.client.credentials.client.id = null
8977 sasl.oauthbearer.client.credentials.client.secret = null
8978 sasl.oauthbearer.clock.skew.seconds = 30
8979 sasl.oauthbearer.expected.audience = null
8980 sasl.oauthbearer.expected.issuer = null
8981 sasl.oauthbearer.header.urlencode = false
8982 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8983 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8984 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8985 sasl.oauthbearer.jwks.endpoint.url = null
8986 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8987 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8988 sasl.oauthbearer.scope = null
8989 sasl.oauthbearer.scope.claim.name = scope
8990 sasl.oauthbearer.sub.claim.name = sub
8991 sasl.oauthbearer.token.endpoint.url = null
8992 security.protocol = PLAINTEXT
8993 security.providers = null
8994 send.buffer.bytes = 131072
8995 socket.connection.setup.timeout.max.ms = 30000
8996 socket.connection.setup.timeout.ms = 10000
8997 ssl.cipher.suites = null
8998 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8999 ssl.endpoint.identification.algorithm = https
9000 ssl.engine.factory.class = null
9001 ssl.key.password = null
9002 ssl.keymanager.algorithm = SunX509
9003 ssl.keystore.certificate.chain = null
9004 ssl.keystore.key = null
9005 ssl.keystore.location = null
9006 ssl.keystore.password = null
9007 ssl.keystore.type = JKS
9008 ssl.protocol = TLSv1.3
9009 ssl.provider = null
9010 ssl.secure.random.implementation = null
9011 ssl.trustmanager.algorithm = PKIX
9012 ssl.truststore.certificates = null
9013 ssl.truststore.location = null
9014 ssl.truststore.password = null
9015 ssl.truststore.type = JKS
9016 transaction.timeout.ms = 60000
9017 transaction.two.phase.commit.enable = false
9018 transactional.id = null
9019 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9020
902101:33:26.000 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
902201:33:26.000 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Instantiated an idempotent producer.
902301:33:26.002 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
902401:33:26.002 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
902501:33:26.002 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832406002
902601:33:26.005 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.Metadata - [Producer clientId=producer-23] Cluster ID: 4oa31apqQtabsfPXH-H0RA
902701:33:26.005 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-23] ProducerId set to 22 with epoch 0
902801:33:26.013 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
902901:33:26.014 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
903001:33:26.014 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
903101:33:26.014 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
903201:33:26.014 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
903301:33:26.015 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-23 unregistered
903401:33:26.016 [virtual-719] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9035 allow.auto.create.topics = true
9036 auto.commit.interval.ms = 5000
9037 auto.offset.reset = earliest
9038 bootstrap.servers = [localhost:6001]
9039 check.crcs = true
9040 client.dns.lookup = use_all_dns_ips
9041 client.id = consumer-g7_1-15
9042 client.rack =
9043 connections.max.idle.ms = 540000
9044 default.api.timeout.ms = 60000
9045 enable.auto.commit = false
9046 enable.metrics.push = true
9047 exclude.internal.topics = true
9048 fetch.max.bytes = 52428800
9049 fetch.max.wait.ms = 500
9050 fetch.min.bytes = 1
9051 group.id = g7_1
9052 group.instance.id = null
9053 group.protocol = classic
9054 group.remote.assignor = null
9055 heartbeat.interval.ms = 3000
9056 interceptor.classes = []
9057 internal.leave.group.on.close = true
9058 internal.throw.on.fetch.stable.offset.unsupported = false
9059 isolation.level = read_uncommitted
9060 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9061 max.partition.fetch.bytes = 1048576
9062 max.poll.interval.ms = 300000
9063 max.poll.records = 500
9064 metadata.max.age.ms = 300000
9065 metadata.recovery.rebootstrap.trigger.ms = 300000
9066 metadata.recovery.strategy = rebootstrap
9067 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9068 metrics.num.samples = 2
9069 metrics.recording.level = INFO
9070 metrics.sample.window.ms = 30000
9071 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9072 receive.buffer.bytes = 65536
9073 reconnect.backoff.max.ms = 1000
9074 reconnect.backoff.ms = 50
9075 request.timeout.ms = 30000
9076 retry.backoff.max.ms = 1000
9077 retry.backoff.ms = 100
9078 sasl.client.callback.handler.class = null
9079 sasl.jaas.config = null
9080 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9081 sasl.kerberos.min.time.before.relogin = 60000
9082 sasl.kerberos.service.name = null
9083 sasl.kerberos.ticket.renew.jitter = 0.05
9084 sasl.kerberos.ticket.renew.window.factor = 0.8
9085 sasl.login.callback.handler.class = null
9086 sasl.login.class = null
9087 sasl.login.connect.timeout.ms = null
9088 sasl.login.read.timeout.ms = null
9089 sasl.login.refresh.buffer.seconds = 300
9090 sasl.login.refresh.min.period.seconds = 60
9091 sasl.login.refresh.window.factor = 0.8
9092 sasl.login.refresh.window.jitter = 0.05
9093 sasl.login.retry.backoff.max.ms = 10000
9094 sasl.login.retry.backoff.ms = 100
9095 sasl.mechanism = GSSAPI
9096 sasl.oauthbearer.assertion.algorithm = RS256
9097 sasl.oauthbearer.assertion.claim.aud = null
9098 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9099 sasl.oauthbearer.assertion.claim.iss = null
9100 sasl.oauthbearer.assertion.claim.jti.include = false
9101 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9102 sasl.oauthbearer.assertion.claim.sub = null
9103 sasl.oauthbearer.assertion.file = null
9104 sasl.oauthbearer.assertion.private.key.file = null
9105 sasl.oauthbearer.assertion.private.key.passphrase = null
9106 sasl.oauthbearer.assertion.template.file = null
9107 sasl.oauthbearer.client.credentials.client.id = null
9108 sasl.oauthbearer.client.credentials.client.secret = null
9109 sasl.oauthbearer.clock.skew.seconds = 30
9110 sasl.oauthbearer.expected.audience = null
9111 sasl.oauthbearer.expected.issuer = null
9112 sasl.oauthbearer.header.urlencode = false
9113 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9114 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9115 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9116 sasl.oauthbearer.jwks.endpoint.url = null
9117 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9118 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9119 sasl.oauthbearer.scope = null
9120 sasl.oauthbearer.scope.claim.name = scope
9121 sasl.oauthbearer.sub.claim.name = sub
9122 sasl.oauthbearer.token.endpoint.url = null
9123 security.protocol = PLAINTEXT
9124 security.providers = null
9125 send.buffer.bytes = 131072
9126 session.timeout.ms = 45000
9127 share.acknowledgement.mode = implicit
9128 socket.connection.setup.timeout.max.ms = 30000
9129 socket.connection.setup.timeout.ms = 10000
9130 ssl.cipher.suites = null
9131 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9132 ssl.endpoint.identification.algorithm = https
9133 ssl.engine.factory.class = null
9134 ssl.key.password = null
9135 ssl.keymanager.algorithm = SunX509
9136 ssl.keystore.certificate.chain = null
9137 ssl.keystore.key = null
9138 ssl.keystore.location = null
9139 ssl.keystore.password = null
9140 ssl.keystore.type = JKS
9141 ssl.protocol = TLSv1.3
9142 ssl.provider = null
9143 ssl.secure.random.implementation = null
9144 ssl.trustmanager.algorithm = PKIX
9145 ssl.truststore.certificates = null
9146 ssl.truststore.location = null
9147 ssl.truststore.password = null
9148 ssl.truststore.type = JKS
9149 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9150
915101:33:26.016 [virtual-719] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
915201:33:26.018 [virtual-719] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
915301:33:26.018 [virtual-719] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
915401:33:26.018 [virtual-719] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832406018
915501:33:26.019 [virtual-720] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Subscribed to topic(s): t7_1
915601:33:26.021 [virtual-720] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
915701:33:26.022 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
915801:33:26.023 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
915901:33:26.025 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8 and requesting the member to rejoin with this id.
916001:33:26.025 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8
916101:33:26.025 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
916201:33:26.026 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8 joins group g7_1 in Empty state. Adding to the group now.
916301:33:26.026 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8 with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8).
916401:33:29.027 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 1 with 1 members.
916501:33:29.027 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8', protocol='range'}
916601:33:29.028 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Finished assignment for group at generation 1: {consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8=Assignment(partitions=[t7_1-0])}
916701:33:29.028 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8 for group g7_1 for generation 1. The group has 1 members, 0 of which are static.
916801:33:29.034 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8', protocol='range'}
916901:33:29.035 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
917001:33:29.035 [virtual-720] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
917101:33:29.036 [virtual-720] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Found no committed offset for partition t7_1-0
917201:33:29.037 [virtual-720] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
917301:33:31.039 [virtual-720] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9174java.lang.InterruptedException: null
9175 ... 18 common frames omitted
9176Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9177 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9178 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9179 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9180 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9181 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9182 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9183 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9184 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9185 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9186 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9187 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9188 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9189 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9190 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9191 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9192 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9193 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9194 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
919501:33:31.039 [virtual-722] ERROR o.k.KafkaFlow$ - Exception when polling for records
9196java.lang.InterruptedException: null
9197 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9198 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9199 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9200 at ox.channels.ActorRef.ask(actor.scala:64)
9201 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9202 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
9203 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9204 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9205 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
9206 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
9207 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
9208 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
9209 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9210 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
9211 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
9212 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
9213 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
9214 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9215 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9216 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
921701:33:31.040 [virtual-727] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
921801:33:31.040 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Member consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
921901:33:31.041 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
922001:33:31.041 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
922101:33:31.041 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
922201:33:31.042 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_1-15-e76e7a0c-eca3-4d9a-8ddf-1d8886702ea8) members.).
922301:33:31.042 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 2 is now empty.
922401:33:31.050 [virtual-727] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
922501:33:31.050 [virtual-727] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
922601:33:31.050 [virtual-727] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
922701:33:31.050 [virtual-727] INFO o.a.k.c.m.Metrics - Metrics reporters closed
922801:33:31.052 [virtual-727] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-15 unregistered
922901:33:31.053 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9230 acks = -1
9231 batch.size = 16384
9232 bootstrap.servers = [localhost:6001]
9233 buffer.memory = 33554432
9234 client.dns.lookup = use_all_dns_ips
9235 client.id = producer-24
9236 compression.gzip.level = -1
9237 compression.lz4.level = 9
9238 compression.type = none
9239 compression.zstd.level = 3
9240 connections.max.idle.ms = 540000
9241 delivery.timeout.ms = 120000
9242 enable.idempotence = true
9243 enable.metrics.push = true
9244 interceptor.classes = []
9245 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9246 linger.ms = 5
9247 max.block.ms = 10000
9248 max.in.flight.requests.per.connection = 5
9249 max.request.size = 1048576
9250 metadata.max.age.ms = 300000
9251 metadata.max.idle.ms = 300000
9252 metadata.recovery.rebootstrap.trigger.ms = 300000
9253 metadata.recovery.strategy = rebootstrap
9254 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9255 metrics.num.samples = 2
9256 metrics.recording.level = INFO
9257 metrics.sample.window.ms = 30000
9258 partitioner.adaptive.partitioning.enable = true
9259 partitioner.availability.timeout.ms = 0
9260 partitioner.class = null
9261 partitioner.ignore.keys = false
9262 receive.buffer.bytes = 32768
9263 reconnect.backoff.max.ms = 1000
9264 reconnect.backoff.ms = 50
9265 request.timeout.ms = 30000
9266 retries = 2147483647
9267 retry.backoff.max.ms = 1000
9268 retry.backoff.ms = 1000
9269 sasl.client.callback.handler.class = null
9270 sasl.jaas.config = null
9271 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9272 sasl.kerberos.min.time.before.relogin = 60000
9273 sasl.kerberos.service.name = null
9274 sasl.kerberos.ticket.renew.jitter = 0.05
9275 sasl.kerberos.ticket.renew.window.factor = 0.8
9276 sasl.login.callback.handler.class = null
9277 sasl.login.class = null
9278 sasl.login.connect.timeout.ms = null
9279 sasl.login.read.timeout.ms = null
9280 sasl.login.refresh.buffer.seconds = 300
9281 sasl.login.refresh.min.period.seconds = 60
9282 sasl.login.refresh.window.factor = 0.8
9283 sasl.login.refresh.window.jitter = 0.05
9284 sasl.login.retry.backoff.max.ms = 10000
9285 sasl.login.retry.backoff.ms = 100
9286 sasl.mechanism = GSSAPI
9287 sasl.oauthbearer.assertion.algorithm = RS256
9288 sasl.oauthbearer.assertion.claim.aud = null
9289 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9290 sasl.oauthbearer.assertion.claim.iss = null
9291 sasl.oauthbearer.assertion.claim.jti.include = false
9292 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9293 sasl.oauthbearer.assertion.claim.sub = null
9294 sasl.oauthbearer.assertion.file = null
9295 sasl.oauthbearer.assertion.private.key.file = null
9296 sasl.oauthbearer.assertion.private.key.passphrase = null
9297 sasl.oauthbearer.assertion.template.file = null
9298 sasl.oauthbearer.client.credentials.client.id = null
9299 sasl.oauthbearer.client.credentials.client.secret = null
9300 sasl.oauthbearer.clock.skew.seconds = 30
9301 sasl.oauthbearer.expected.audience = null
9302 sasl.oauthbearer.expected.issuer = null
9303 sasl.oauthbearer.header.urlencode = false
9304 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9305 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9306 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9307 sasl.oauthbearer.jwks.endpoint.url = null
9308 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9309 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9310 sasl.oauthbearer.scope = null
9311 sasl.oauthbearer.scope.claim.name = scope
9312 sasl.oauthbearer.sub.claim.name = sub
9313 sasl.oauthbearer.token.endpoint.url = null
9314 security.protocol = PLAINTEXT
9315 security.providers = null
9316 send.buffer.bytes = 131072
9317 socket.connection.setup.timeout.max.ms = 30000
9318 socket.connection.setup.timeout.ms = 10000
9319 ssl.cipher.suites = null
9320 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9321 ssl.endpoint.identification.algorithm = https
9322 ssl.engine.factory.class = null
9323 ssl.key.password = null
9324 ssl.keymanager.algorithm = SunX509
9325 ssl.keystore.certificate.chain = null
9326 ssl.keystore.key = null
9327 ssl.keystore.location = null
9328 ssl.keystore.password = null
9329 ssl.keystore.type = JKS
9330 ssl.protocol = TLSv1.3
9331 ssl.provider = null
9332 ssl.secure.random.implementation = null
9333 ssl.trustmanager.algorithm = PKIX
9334 ssl.truststore.certificates = null
9335 ssl.truststore.location = null
9336 ssl.truststore.password = null
9337 ssl.truststore.type = JKS
9338 transaction.timeout.ms = 60000
9339 transaction.two.phase.commit.enable = false
9340 transactional.id = null
9341 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9342
934301:33:31.053 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
934401:33:31.053 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Instantiated an idempotent producer.
934501:33:31.054 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
934601:33:31.054 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
934701:33:31.054 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832411054
934801:33:31.057 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.Metadata - [Producer clientId=producer-24] Cluster ID: 4oa31apqQtabsfPXH-H0RA
934901:33:31.057 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-24] ProducerId set to 23 with epoch 0
935001:33:31.066 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
935101:33:31.067 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
935201:33:31.067 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
935301:33:31.067 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
935401:33:31.067 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
935501:33:31.067 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-24 unregistered
935601:33:31.069 [virtual-729] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9357 allow.auto.create.topics = true
9358 auto.commit.interval.ms = 5000
9359 auto.offset.reset = earliest
9360 bootstrap.servers = [localhost:6001]
9361 check.crcs = true
9362 client.dns.lookup = use_all_dns_ips
9363 client.id = consumer-g7_1-16
9364 client.rack =
9365 connections.max.idle.ms = 540000
9366 default.api.timeout.ms = 60000
9367 enable.auto.commit = false
9368 enable.metrics.push = true
9369 exclude.internal.topics = true
9370 fetch.max.bytes = 52428800
9371 fetch.max.wait.ms = 500
9372 fetch.min.bytes = 1
9373 group.id = g7_1
9374 group.instance.id = null
9375 group.protocol = classic
9376 group.remote.assignor = null
9377 heartbeat.interval.ms = 3000
9378 interceptor.classes = []
9379 internal.leave.group.on.close = true
9380 internal.throw.on.fetch.stable.offset.unsupported = false
9381 isolation.level = read_uncommitted
9382 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9383 max.partition.fetch.bytes = 1048576
9384 max.poll.interval.ms = 300000
9385 max.poll.records = 500
9386 metadata.max.age.ms = 300000
9387 metadata.recovery.rebootstrap.trigger.ms = 300000
9388 metadata.recovery.strategy = rebootstrap
9389 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9390 metrics.num.samples = 2
9391 metrics.recording.level = INFO
9392 metrics.sample.window.ms = 30000
9393 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9394 receive.buffer.bytes = 65536
9395 reconnect.backoff.max.ms = 1000
9396 reconnect.backoff.ms = 50
9397 request.timeout.ms = 30000
9398 retry.backoff.max.ms = 1000
9399 retry.backoff.ms = 100
9400 sasl.client.callback.handler.class = null
9401 sasl.jaas.config = null
9402 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9403 sasl.kerberos.min.time.before.relogin = 60000
9404 sasl.kerberos.service.name = null
9405 sasl.kerberos.ticket.renew.jitter = 0.05
9406 sasl.kerberos.ticket.renew.window.factor = 0.8
9407 sasl.login.callback.handler.class = null
9408 sasl.login.class = null
9409 sasl.login.connect.timeout.ms = null
9410 sasl.login.read.timeout.ms = null
9411 sasl.login.refresh.buffer.seconds = 300
9412 sasl.login.refresh.min.period.seconds = 60
9413 sasl.login.refresh.window.factor = 0.8
9414 sasl.login.refresh.window.jitter = 0.05
9415 sasl.login.retry.backoff.max.ms = 10000
9416 sasl.login.retry.backoff.ms = 100
9417 sasl.mechanism = GSSAPI
9418 sasl.oauthbearer.assertion.algorithm = RS256
9419 sasl.oauthbearer.assertion.claim.aud = null
9420 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9421 sasl.oauthbearer.assertion.claim.iss = null
9422 sasl.oauthbearer.assertion.claim.jti.include = false
9423 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9424 sasl.oauthbearer.assertion.claim.sub = null
9425 sasl.oauthbearer.assertion.file = null
9426 sasl.oauthbearer.assertion.private.key.file = null
9427 sasl.oauthbearer.assertion.private.key.passphrase = null
9428 sasl.oauthbearer.assertion.template.file = null
9429 sasl.oauthbearer.client.credentials.client.id = null
9430 sasl.oauthbearer.client.credentials.client.secret = null
9431 sasl.oauthbearer.clock.skew.seconds = 30
9432 sasl.oauthbearer.expected.audience = null
9433 sasl.oauthbearer.expected.issuer = null
9434 sasl.oauthbearer.header.urlencode = false
9435 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9436 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9437 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9438 sasl.oauthbearer.jwks.endpoint.url = null
9439 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9440 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9441 sasl.oauthbearer.scope = null
9442 sasl.oauthbearer.scope.claim.name = scope
9443 sasl.oauthbearer.sub.claim.name = sub
9444 sasl.oauthbearer.token.endpoint.url = null
9445 security.protocol = PLAINTEXT
9446 security.providers = null
9447 send.buffer.bytes = 131072
9448 session.timeout.ms = 45000
9449 share.acknowledgement.mode = implicit
9450 socket.connection.setup.timeout.max.ms = 30000
9451 socket.connection.setup.timeout.ms = 10000
9452 ssl.cipher.suites = null
9453 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9454 ssl.endpoint.identification.algorithm = https
9455 ssl.engine.factory.class = null
9456 ssl.key.password = null
9457 ssl.keymanager.algorithm = SunX509
9458 ssl.keystore.certificate.chain = null
9459 ssl.keystore.key = null
9460 ssl.keystore.location = null
9461 ssl.keystore.password = null
9462 ssl.keystore.type = JKS
9463 ssl.protocol = TLSv1.3
9464 ssl.provider = null
9465 ssl.secure.random.implementation = null
9466 ssl.trustmanager.algorithm = PKIX
9467 ssl.truststore.certificates = null
9468 ssl.truststore.location = null
9469 ssl.truststore.password = null
9470 ssl.truststore.type = JKS
9471 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9472
947301:33:31.070 [virtual-729] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
947401:33:31.072 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
947501:33:31.072 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
947601:33:31.072 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832411072
947701:33:31.072 [virtual-732] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Subscribed to topic(s): t7_1
947801:33:31.074 [virtual-732] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
947901:33:31.075 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
948001:33:31.076 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
948101:33:31.077 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc and requesting the member to rejoin with this id.
948201:33:31.077 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc
948301:33:31.077 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
948401:33:31.078 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc joins group g7_1 in Empty state. Adding to the group now.
948501:33:31.078 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc).
948601:33:34.078 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 3 with 1 members.
948701:33:34.079 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc', protocol='range'}
948801:33:34.079 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Finished assignment for group at generation 3: {consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc=Assignment(partitions=[t7_1-0])}
948901:33:34.080 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc for group g7_1 for generation 3. The group has 1 members, 0 of which are static.
949001:33:34.085 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc', protocol='range'}
949101:33:34.085 [virtual-732] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
949201:33:34.085 [virtual-732] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
949301:33:34.086 [virtual-732] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t7_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
949401:33:34.090 [virtual-729] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9495 allow.auto.create.topics = true
9496 auto.commit.interval.ms = 5000
9497 auto.offset.reset = earliest
9498 bootstrap.servers = [localhost:6001]
9499 check.crcs = true
9500 client.dns.lookup = use_all_dns_ips
9501 client.id = consumer-g7_2-17
9502 client.rack =
9503 connections.max.idle.ms = 540000
9504 default.api.timeout.ms = 60000
9505 enable.auto.commit = false
9506 enable.metrics.push = true
9507 exclude.internal.topics = true
9508 fetch.max.bytes = 52428800
9509 fetch.max.wait.ms = 500
9510 fetch.min.bytes = 1
9511 group.id = g7_2
9512 group.instance.id = null
9513 group.protocol = classic
9514 group.remote.assignor = null
9515 heartbeat.interval.ms = 3000
9516 interceptor.classes = []
9517 internal.leave.group.on.close = true
9518 internal.throw.on.fetch.stable.offset.unsupported = false
9519 isolation.level = read_uncommitted
9520 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9521 max.partition.fetch.bytes = 1048576
9522 max.poll.interval.ms = 300000
9523 max.poll.records = 500
9524 metadata.max.age.ms = 300000
9525 metadata.recovery.rebootstrap.trigger.ms = 300000
9526 metadata.recovery.strategy = rebootstrap
9527 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9528 metrics.num.samples = 2
9529 metrics.recording.level = INFO
9530 metrics.sample.window.ms = 30000
9531 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9532 receive.buffer.bytes = 65536
9533 reconnect.backoff.max.ms = 1000
9534 reconnect.backoff.ms = 50
9535 request.timeout.ms = 30000
9536 retry.backoff.max.ms = 1000
9537 retry.backoff.ms = 100
9538 sasl.client.callback.handler.class = null
9539 sasl.jaas.config = null
9540 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9541 sasl.kerberos.min.time.before.relogin = 60000
9542 sasl.kerberos.service.name = null
9543 sasl.kerberos.ticket.renew.jitter = 0.05
9544 sasl.kerberos.ticket.renew.window.factor = 0.8
9545 sasl.login.callback.handler.class = null
9546 sasl.login.class = null
9547 sasl.login.connect.timeout.ms = null
9548 sasl.login.read.timeout.ms = null
9549 sasl.login.refresh.buffer.seconds = 300
9550 sasl.login.refresh.min.period.seconds = 60
9551 sasl.login.refresh.window.factor = 0.8
9552 sasl.login.refresh.window.jitter = 0.05
9553 sasl.login.retry.backoff.max.ms = 10000
9554 sasl.login.retry.backoff.ms = 100
9555 sasl.mechanism = GSSAPI
9556 sasl.oauthbearer.assertion.algorithm = RS256
9557 sasl.oauthbearer.assertion.claim.aud = null
9558 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9559 sasl.oauthbearer.assertion.claim.iss = null
9560 sasl.oauthbearer.assertion.claim.jti.include = false
9561 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9562 sasl.oauthbearer.assertion.claim.sub = null
9563 sasl.oauthbearer.assertion.file = null
9564 sasl.oauthbearer.assertion.private.key.file = null
9565 sasl.oauthbearer.assertion.private.key.passphrase = null
9566 sasl.oauthbearer.assertion.template.file = null
9567 sasl.oauthbearer.client.credentials.client.id = null
9568 sasl.oauthbearer.client.credentials.client.secret = null
9569 sasl.oauthbearer.clock.skew.seconds = 30
9570 sasl.oauthbearer.expected.audience = null
9571 sasl.oauthbearer.expected.issuer = null
9572 sasl.oauthbearer.header.urlencode = false
9573 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9574 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9575 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9576 sasl.oauthbearer.jwks.endpoint.url = null
9577 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9578 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9579 sasl.oauthbearer.scope = null
9580 sasl.oauthbearer.scope.claim.name = scope
9581 sasl.oauthbearer.sub.claim.name = sub
9582 sasl.oauthbearer.token.endpoint.url = null
9583 security.protocol = PLAINTEXT
9584 security.providers = null
9585 send.buffer.bytes = 131072
9586 session.timeout.ms = 45000
9587 share.acknowledgement.mode = implicit
9588 socket.connection.setup.timeout.max.ms = 30000
9589 socket.connection.setup.timeout.ms = 10000
9590 ssl.cipher.suites = null
9591 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9592 ssl.endpoint.identification.algorithm = https
9593 ssl.engine.factory.class = null
9594 ssl.key.password = null
9595 ssl.keymanager.algorithm = SunX509
9596 ssl.keystore.certificate.chain = null
9597 ssl.keystore.key = null
9598 ssl.keystore.location = null
9599 ssl.keystore.password = null
9600 ssl.keystore.type = JKS
9601 ssl.protocol = TLSv1.3
9602 ssl.provider = null
9603 ssl.secure.random.implementation = null
9604 ssl.trustmanager.algorithm = PKIX
9605 ssl.truststore.certificates = null
9606 ssl.truststore.location = null
9607 ssl.truststore.password = null
9608 ssl.truststore.type = JKS
9609 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9610
961101:33:34.090 [virtual-729] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
961201:33:34.092 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
961301:33:34.092 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
961401:33:34.092 [virtual-729] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832414092
961501:33:34.093 [virtual-736] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Subscribed to topic(s): t7_1
961601:33:34.096 [virtual-736] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Cluster ID: 4oa31apqQtabsfPXH-H0RA
961701:33:34.096 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
961801:33:34.097 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
961901:33:34.099 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_2 in Empty state. Created a new member id consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a and requesting the member to rejoin with this id.
962001:33:34.099 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: need to re-join with the given member-id: consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a
962101:33:34.100 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
962201:33:34.100 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a joins group g7_2 in Empty state. Adding to the group now.
962301:33:34.101 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a).
962401:33:37.101 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_2 generation 1 with 1 members.
962501:33:37.101 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a', protocol='range'}
962601:33:37.101 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Finished assignment for group at generation 1: {consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a=Assignment(partitions=[t7_1-0])}
962701:33:37.102 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a for group g7_2 for generation 1. The group has 1 members, 0 of which are static.
962801:33:37.108 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a', protocol='range'}
962901:33:37.109 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Notifying assignor about the new Assignment(partitions=[t7_1-0])
963001:33:37.109 [virtual-736] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Adding newly assigned partitions: [t7_1-0]
963101:33:37.109 [virtual-736] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Found no committed offset for partition t7_1-0
963201:33:37.111 [virtual-736] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
963301:33:37.113 [virtual-731] ERROR o.k.KafkaFlow$ - Exception when polling for records
9634java.lang.InterruptedException: null
9635 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9636 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9637 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9638 at ox.channels.ActorRef.ask(actor.scala:64)
9639 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9640 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9641 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9642 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9643 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9644 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9645 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9646 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9647 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9648 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
964901:33:37.113 [virtual-732] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9650java.lang.InterruptedException: null
9651 ... 18 common frames omitted
9652Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9653 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9654 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9655 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9656 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9657 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9658 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9659 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9660 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9661 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9662 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9663 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9664 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9665 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9666 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9667 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9668 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9669 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9670 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
967101:33:37.113 [virtual-736] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9672java.lang.InterruptedException: null
9673 ... 18 common frames omitted
9674Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9675 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9676 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9677 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9678 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9679 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9680 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9681 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9682 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9683 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9684 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9685 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9686 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9687 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9688 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9689 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9690 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9691 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9692 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
969301:33:37.113 [virtual-735] ERROR o.k.KafkaFlow$ - Exception when polling for records
9694java.lang.InterruptedException: null
9695 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9696 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9697 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9698 at ox.channels.ActorRef.ask(actor.scala:64)
9699 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9700 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9701 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9702 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9703 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9704 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9705 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9706 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9707 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9708 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
970901:33:37.114 [virtual-739] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Revoke previously assigned partitions [t7_1-0]
971001:33:37.114 [virtual-738] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
971101:33:37.115 [virtual-738] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Member consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
971201:33:37.115 [virtual-739] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Member consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
971301:33:37.115 [virtual-739] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting generation and member id due to: consumer pro-actively leaving the group
971401:33:37.115 [virtual-739] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: consumer pro-actively leaving the group
971501:33:37.115 [virtual-738] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
971601:33:37.115 [virtual-738] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
971701:33:37.115 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_2] Member consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
971801:33:37.115 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_2-17-f3a3218a-7d4e-481b-ba61-a64e0513ed6a) members.).
971901:33:37.115 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_2 with generation 2 is now empty.
972001:33:37.116 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
972101:33:37.116 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g7_1-16-373bfc36-07ca-48dc-91de-69e00ff73adc) members.).
972201:33:37.116 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 4 is now empty.
972301:33:37.601 [virtual-738] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
972401:33:37.601 [virtual-738] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
972501:33:37.601 [virtual-738] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
972601:33:37.601 [virtual-738] INFO o.a.k.c.m.Metrics - Metrics reporters closed
972701:33:37.602 [virtual-738] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-16 unregistered
972801:33:37.615 [virtual-739] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
972901:33:37.615 [virtual-739] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
973001:33:37.615 [virtual-739] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
973101:33:37.615 [virtual-739] INFO o.a.k.c.m.Metrics - Metrics reporters closed
973201:33:37.616 [virtual-739] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_2-17 unregistered
973301:33:37.618 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9734 acks = -1
9735 batch.size = 16384
9736 bootstrap.servers = [localhost:6001]
9737 buffer.memory = 33554432
9738 client.dns.lookup = use_all_dns_ips
9739 client.id = producer-25
9740 compression.gzip.level = -1
9741 compression.lz4.level = 9
9742 compression.type = none
9743 compression.zstd.level = 3
9744 connections.max.idle.ms = 540000
9745 delivery.timeout.ms = 120000
9746 enable.idempotence = true
9747 enable.metrics.push = true
9748 interceptor.classes = []
9749 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9750 linger.ms = 5
9751 max.block.ms = 10000
9752 max.in.flight.requests.per.connection = 5
9753 max.request.size = 1048576
9754 metadata.max.age.ms = 300000
9755 metadata.max.idle.ms = 300000
9756 metadata.recovery.rebootstrap.trigger.ms = 300000
9757 metadata.recovery.strategy = rebootstrap
9758 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9759 metrics.num.samples = 2
9760 metrics.recording.level = INFO
9761 metrics.sample.window.ms = 30000
9762 partitioner.adaptive.partitioning.enable = true
9763 partitioner.availability.timeout.ms = 0
9764 partitioner.class = null
9765 partitioner.ignore.keys = false
9766 receive.buffer.bytes = 32768
9767 reconnect.backoff.max.ms = 1000
9768 reconnect.backoff.ms = 50
9769 request.timeout.ms = 30000
9770 retries = 2147483647
9771 retry.backoff.max.ms = 1000
9772 retry.backoff.ms = 1000
9773 sasl.client.callback.handler.class = null
9774 sasl.jaas.config = null
9775 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9776 sasl.kerberos.min.time.before.relogin = 60000
9777 sasl.kerberos.service.name = null
9778 sasl.kerberos.ticket.renew.jitter = 0.05
9779 sasl.kerberos.ticket.renew.window.factor = 0.8
9780 sasl.login.callback.handler.class = null
9781 sasl.login.class = null
9782 sasl.login.connect.timeout.ms = null
9783 sasl.login.read.timeout.ms = null
9784 sasl.login.refresh.buffer.seconds = 300
9785 sasl.login.refresh.min.period.seconds = 60
9786 sasl.login.refresh.window.factor = 0.8
9787 sasl.login.refresh.window.jitter = 0.05
9788 sasl.login.retry.backoff.max.ms = 10000
9789 sasl.login.retry.backoff.ms = 100
9790 sasl.mechanism = GSSAPI
9791 sasl.oauthbearer.assertion.algorithm = RS256
9792 sasl.oauthbearer.assertion.claim.aud = null
9793 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9794 sasl.oauthbearer.assertion.claim.iss = null
9795 sasl.oauthbearer.assertion.claim.jti.include = false
9796 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9797 sasl.oauthbearer.assertion.claim.sub = null
9798 sasl.oauthbearer.assertion.file = null
9799 sasl.oauthbearer.assertion.private.key.file = null
9800 sasl.oauthbearer.assertion.private.key.passphrase = null
9801 sasl.oauthbearer.assertion.template.file = null
9802 sasl.oauthbearer.client.credentials.client.id = null
9803 sasl.oauthbearer.client.credentials.client.secret = null
9804 sasl.oauthbearer.clock.skew.seconds = 30
9805 sasl.oauthbearer.expected.audience = null
9806 sasl.oauthbearer.expected.issuer = null
9807 sasl.oauthbearer.header.urlencode = false
9808 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9809 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9810 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9811 sasl.oauthbearer.jwks.endpoint.url = null
9812 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9813 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9814 sasl.oauthbearer.scope = null
9815 sasl.oauthbearer.scope.claim.name = scope
9816 sasl.oauthbearer.sub.claim.name = sub
9817 sasl.oauthbearer.token.endpoint.url = null
9818 security.protocol = PLAINTEXT
9819 security.providers = null
9820 send.buffer.bytes = 131072
9821 socket.connection.setup.timeout.max.ms = 30000
9822 socket.connection.setup.timeout.ms = 10000
9823 ssl.cipher.suites = null
9824 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9825 ssl.endpoint.identification.algorithm = https
9826 ssl.engine.factory.class = null
9827 ssl.key.password = null
9828 ssl.keymanager.algorithm = SunX509
9829 ssl.keystore.certificate.chain = null
9830 ssl.keystore.key = null
9831 ssl.keystore.location = null
9832 ssl.keystore.password = null
9833 ssl.keystore.type = JKS
9834 ssl.protocol = TLSv1.3
9835 ssl.provider = null
9836 ssl.secure.random.implementation = null
9837 ssl.trustmanager.algorithm = PKIX
9838 ssl.truststore.certificates = null
9839 ssl.truststore.location = null
9840 ssl.truststore.password = null
9841 ssl.truststore.type = JKS
9842 transaction.timeout.ms = 60000
9843 transaction.two.phase.commit.enable = false
9844 transactional.id = null
9845 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9846
984701:33:37.618 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
984801:33:37.619 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Instantiated an idempotent producer.
984901:33:37.620 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
985001:33:37.620 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
985101:33:37.620 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832417620
985201:33:37.622 [data-plane-kafka-request-handler-2] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t8_1) to the active controller.
985301:33:37.623 [kafka-producer-network-thread | producer-25] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-25] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t8_1=UNKNOWN_TOPIC_OR_PARTITION}
985401:33:37.623 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.Metadata - [Producer clientId=producer-25] Cluster ID: 4oa31apqQtabsfPXH-H0RA
985501:33:37.624 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t8_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
985601:33:37.624 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t8_1 with topic ID yDhRka4mRnW84FLjv7IepA.
985701:33:37.624 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t8_1-0 with topic ID yDhRka4mRnW84FLjv7IepA and PartitionRegistration(replicas=[0], directories=[lLBEpjnhyLJNV8aO2iBtcg], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
985801:33:37.625 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-25] ProducerId set to 24 with epoch 0
985901:33:37.650 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
986001:33:37.651 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t8_1-0)
986101:33:37.651 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t8_1-0 with topic id yDhRka4mRnW84FLjv7IepA.
986201:33:37.653 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t8_1-0, dir=/tmp/kafka-logs15769196062054598040] Loading producer state till offset 0
986301:33:37.653 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t8_1-0 in /tmp/kafka-logs15769196062054598040/t8_1-0 with properties {}
986401:33:37.654 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] No checkpointed highwatermark is found for partition t8_1-0
986501:33:37.654 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] Log loaded for partition t8_1-0 with initial high watermark 0
986601:33:37.654 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t8_1-0 with topic id Some(yDhRka4mRnW84FLjv7IepA) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
986701:33:38.634 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
986801:33:38.636 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
986901:33:38.636 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
987001:33:38.636 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
987101:33:38.636 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
987201:33:38.636 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-25 unregistered
987301:33:38.636 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9874 acks = -1
9875 batch.size = 16384
9876 bootstrap.servers = [localhost:6001]
9877 buffer.memory = 33554432
9878 client.dns.lookup = use_all_dns_ips
9879 client.id = producer-26
9880 compression.gzip.level = -1
9881 compression.lz4.level = 9
9882 compression.type = none
9883 compression.zstd.level = 3
9884 connections.max.idle.ms = 540000
9885 delivery.timeout.ms = 120000
9886 enable.idempotence = true
9887 enable.metrics.push = true
9888 interceptor.classes = []
9889 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9890 linger.ms = 5
9891 max.block.ms = 10000
9892 max.in.flight.requests.per.connection = 5
9893 max.request.size = 1048576
9894 metadata.max.age.ms = 300000
9895 metadata.max.idle.ms = 300000
9896 metadata.recovery.rebootstrap.trigger.ms = 300000
9897 metadata.recovery.strategy = rebootstrap
9898 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9899 metrics.num.samples = 2
9900 metrics.recording.level = INFO
9901 metrics.sample.window.ms = 30000
9902 partitioner.adaptive.partitioning.enable = true
9903 partitioner.availability.timeout.ms = 0
9904 partitioner.class = null
9905 partitioner.ignore.keys = false
9906 receive.buffer.bytes = 32768
9907 reconnect.backoff.max.ms = 1000
9908 reconnect.backoff.ms = 50
9909 request.timeout.ms = 30000
9910 retries = 2147483647
9911 retry.backoff.max.ms = 1000
9912 retry.backoff.ms = 1000
9913 sasl.client.callback.handler.class = null
9914 sasl.jaas.config = null
9915 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9916 sasl.kerberos.min.time.before.relogin = 60000
9917 sasl.kerberos.service.name = null
9918 sasl.kerberos.ticket.renew.jitter = 0.05
9919 sasl.kerberos.ticket.renew.window.factor = 0.8
9920 sasl.login.callback.handler.class = null
9921 sasl.login.class = null
9922 sasl.login.connect.timeout.ms = null
9923 sasl.login.read.timeout.ms = null
9924 sasl.login.refresh.buffer.seconds = 300
9925 sasl.login.refresh.min.period.seconds = 60
9926 sasl.login.refresh.window.factor = 0.8
9927 sasl.login.refresh.window.jitter = 0.05
9928 sasl.login.retry.backoff.max.ms = 10000
9929 sasl.login.retry.backoff.ms = 100
9930 sasl.mechanism = GSSAPI
9931 sasl.oauthbearer.assertion.algorithm = RS256
9932 sasl.oauthbearer.assertion.claim.aud = null
9933 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9934 sasl.oauthbearer.assertion.claim.iss = null
9935 sasl.oauthbearer.assertion.claim.jti.include = false
9936 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9937 sasl.oauthbearer.assertion.claim.sub = null
9938 sasl.oauthbearer.assertion.file = null
9939 sasl.oauthbearer.assertion.private.key.file = null
9940 sasl.oauthbearer.assertion.private.key.passphrase = null
9941 sasl.oauthbearer.assertion.template.file = null
9942 sasl.oauthbearer.client.credentials.client.id = null
9943 sasl.oauthbearer.client.credentials.client.secret = null
9944 sasl.oauthbearer.clock.skew.seconds = 30
9945 sasl.oauthbearer.expected.audience = null
9946 sasl.oauthbearer.expected.issuer = null
9947 sasl.oauthbearer.header.urlencode = false
9948 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9949 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9950 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9951 sasl.oauthbearer.jwks.endpoint.url = null
9952 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9953 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9954 sasl.oauthbearer.scope = null
9955 sasl.oauthbearer.scope.claim.name = scope
9956 sasl.oauthbearer.sub.claim.name = sub
9957 sasl.oauthbearer.token.endpoint.url = null
9958 security.protocol = PLAINTEXT
9959 security.providers = null
9960 send.buffer.bytes = 131072
9961 socket.connection.setup.timeout.max.ms = 30000
9962 socket.connection.setup.timeout.ms = 10000
9963 ssl.cipher.suites = null
9964 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9965 ssl.endpoint.identification.algorithm = https
9966 ssl.engine.factory.class = null
9967 ssl.key.password = null
9968 ssl.keymanager.algorithm = SunX509
9969 ssl.keystore.certificate.chain = null
9970 ssl.keystore.key = null
9971 ssl.keystore.location = null
9972 ssl.keystore.password = null
9973 ssl.keystore.type = JKS
9974 ssl.protocol = TLSv1.3
9975 ssl.provider = null
9976 ssl.secure.random.implementation = null
9977 ssl.trustmanager.algorithm = PKIX
9978 ssl.truststore.certificates = null
9979 ssl.truststore.location = null
9980 ssl.truststore.password = null
9981 ssl.truststore.type = JKS
9982 transaction.timeout.ms = 60000
9983 transaction.two.phase.commit.enable = false
9984 transactional.id = null
9985 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9986
998701:33:38.637 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
998801:33:38.637 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Instantiated an idempotent producer.
998901:33:38.638 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
999001:33:38.638 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
999101:33:38.638 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832418638
999201:33:38.641 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.Metadata - [Producer clientId=producer-26] Cluster ID: 4oa31apqQtabsfPXH-H0RA
999301:33:38.641 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-26] ProducerId set to 25 with epoch 0
999401:33:38.649 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
999501:33:38.650 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
999601:33:38.650 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
999701:33:38.650 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
999801:33:38.650 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
999901:33:38.651 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-26 unregistered
1000001:33:38.652 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10001 acks = -1
10002 batch.size = 16384
10003 bootstrap.servers = [localhost:6001]
10004 buffer.memory = 33554432
10005 client.dns.lookup = use_all_dns_ips
10006 client.id = producer-27
10007 compression.gzip.level = -1
10008 compression.lz4.level = 9
10009 compression.type = none
10010 compression.zstd.level = 3
10011 connections.max.idle.ms = 540000
10012 delivery.timeout.ms = 120000
10013 enable.idempotence = true
10014 enable.metrics.push = true
10015 interceptor.classes = []
10016 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10017 linger.ms = 5
10018 max.block.ms = 10000
10019 max.in.flight.requests.per.connection = 5
10020 max.request.size = 1048576
10021 metadata.max.age.ms = 300000
10022 metadata.max.idle.ms = 300000
10023 metadata.recovery.rebootstrap.trigger.ms = 300000
10024 metadata.recovery.strategy = rebootstrap
10025 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10026 metrics.num.samples = 2
10027 metrics.recording.level = INFO
10028 metrics.sample.window.ms = 30000
10029 partitioner.adaptive.partitioning.enable = true
10030 partitioner.availability.timeout.ms = 0
10031 partitioner.class = null
10032 partitioner.ignore.keys = false
10033 receive.buffer.bytes = 32768
10034 reconnect.backoff.max.ms = 1000
10035 reconnect.backoff.ms = 50
10036 request.timeout.ms = 30000
10037 retries = 2147483647
10038 retry.backoff.max.ms = 1000
10039 retry.backoff.ms = 1000
10040 sasl.client.callback.handler.class = null
10041 sasl.jaas.config = null
10042 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10043 sasl.kerberos.min.time.before.relogin = 60000
10044 sasl.kerberos.service.name = null
10045 sasl.kerberos.ticket.renew.jitter = 0.05
10046 sasl.kerberos.ticket.renew.window.factor = 0.8
10047 sasl.login.callback.handler.class = null
10048 sasl.login.class = null
10049 sasl.login.connect.timeout.ms = null
10050 sasl.login.read.timeout.ms = null
10051 sasl.login.refresh.buffer.seconds = 300
10052 sasl.login.refresh.min.period.seconds = 60
10053 sasl.login.refresh.window.factor = 0.8
10054 sasl.login.refresh.window.jitter = 0.05
10055 sasl.login.retry.backoff.max.ms = 10000
10056 sasl.login.retry.backoff.ms = 100
10057 sasl.mechanism = GSSAPI
10058 sasl.oauthbearer.assertion.algorithm = RS256
10059 sasl.oauthbearer.assertion.claim.aud = null
10060 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10061 sasl.oauthbearer.assertion.claim.iss = null
10062 sasl.oauthbearer.assertion.claim.jti.include = false
10063 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10064 sasl.oauthbearer.assertion.claim.sub = null
10065 sasl.oauthbearer.assertion.file = null
10066 sasl.oauthbearer.assertion.private.key.file = null
10067 sasl.oauthbearer.assertion.private.key.passphrase = null
10068 sasl.oauthbearer.assertion.template.file = null
10069 sasl.oauthbearer.client.credentials.client.id = null
10070 sasl.oauthbearer.client.credentials.client.secret = null
10071 sasl.oauthbearer.clock.skew.seconds = 30
10072 sasl.oauthbearer.expected.audience = null
10073 sasl.oauthbearer.expected.issuer = null
10074 sasl.oauthbearer.header.urlencode = false
10075 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10076 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10077 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10078 sasl.oauthbearer.jwks.endpoint.url = null
10079 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10080 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10081 sasl.oauthbearer.scope = null
10082 sasl.oauthbearer.scope.claim.name = scope
10083 sasl.oauthbearer.sub.claim.name = sub
10084 sasl.oauthbearer.token.endpoint.url = null
10085 security.protocol = PLAINTEXT
10086 security.providers = null
10087 send.buffer.bytes = 131072
10088 socket.connection.setup.timeout.max.ms = 30000
10089 socket.connection.setup.timeout.ms = 10000
10090 ssl.cipher.suites = null
10091 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10092 ssl.endpoint.identification.algorithm = https
10093 ssl.engine.factory.class = null
10094 ssl.key.password = null
10095 ssl.keymanager.algorithm = SunX509
10096 ssl.keystore.certificate.chain = null
10097 ssl.keystore.key = null
10098 ssl.keystore.location = null
10099 ssl.keystore.password = null
10100 ssl.keystore.type = JKS
10101 ssl.protocol = TLSv1.3
10102 ssl.provider = null
10103 ssl.secure.random.implementation = null
10104 ssl.trustmanager.algorithm = PKIX
10105 ssl.truststore.certificates = null
10106 ssl.truststore.location = null
10107 ssl.truststore.password = null
10108 ssl.truststore.type = JKS
10109 transaction.timeout.ms = 60000
10110 transaction.two.phase.commit.enable = false
10111 transactional.id = null
10112 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10113
1011401:33:38.652 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1011501:33:38.652 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Instantiated an idempotent producer.
1011601:33:38.654 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1011701:33:38.655 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1011801:33:38.655 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832418654
1011901:33:38.657 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.Metadata - [Producer clientId=producer-27] Cluster ID: 4oa31apqQtabsfPXH-H0RA
1012001:33:38.657 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-27] ProducerId set to 26 with epoch 0
1012101:33:38.665 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1012201:33:38.666 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1012301:33:38.667 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1012401:33:38.667 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1012501:33:38.667 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1012601:33:38.667 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-27 unregistered
1012701:33:38.668 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10128 acks = -1
10129 batch.size = 16384
10130 bootstrap.servers = [localhost:6001]
10131 buffer.memory = 33554432
10132 client.dns.lookup = use_all_dns_ips
10133 client.id = producer-28
10134 compression.gzip.level = -1
10135 compression.lz4.level = 9
10136 compression.type = none
10137 compression.zstd.level = 3
10138 connections.max.idle.ms = 540000
10139 delivery.timeout.ms = 120000
10140 enable.idempotence = true
10141 enable.metrics.push = true
10142 interceptor.classes = []
10143 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10144 linger.ms = 5
10145 max.block.ms = 10000
10146 max.in.flight.requests.per.connection = 5
10147 max.request.size = 1048576
10148 metadata.max.age.ms = 300000
10149 metadata.max.idle.ms = 300000
10150 metadata.recovery.rebootstrap.trigger.ms = 300000
10151 metadata.recovery.strategy = rebootstrap
10152 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10153 metrics.num.samples = 2
10154 metrics.recording.level = INFO
10155 metrics.sample.window.ms = 30000
10156 partitioner.adaptive.partitioning.enable = true
10157 partitioner.availability.timeout.ms = 0
10158 partitioner.class = null
10159 partitioner.ignore.keys = false
10160 receive.buffer.bytes = 32768
10161 reconnect.backoff.max.ms = 1000
10162 reconnect.backoff.ms = 50
10163 request.timeout.ms = 30000
10164 retries = 2147483647
10165 retry.backoff.max.ms = 1000
10166 retry.backoff.ms = 1000
10167 sasl.client.callback.handler.class = null
10168 sasl.jaas.config = null
10169 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10170 sasl.kerberos.min.time.before.relogin = 60000
10171 sasl.kerberos.service.name = null
10172 sasl.kerberos.ticket.renew.jitter = 0.05
10173 sasl.kerberos.ticket.renew.window.factor = 0.8
10174 sasl.login.callback.handler.class = null
10175 sasl.login.class = null
10176 sasl.login.connect.timeout.ms = null
10177 sasl.login.read.timeout.ms = null
10178 sasl.login.refresh.buffer.seconds = 300
10179 sasl.login.refresh.min.period.seconds = 60
10180 sasl.login.refresh.window.factor = 0.8
10181 sasl.login.refresh.window.jitter = 0.05
10182 sasl.login.retry.backoff.max.ms = 10000
10183 sasl.login.retry.backoff.ms = 100
10184 sasl.mechanism = GSSAPI
10185 sasl.oauthbearer.assertion.algorithm = RS256
10186 sasl.oauthbearer.assertion.claim.aud = null
10187 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10188 sasl.oauthbearer.assertion.claim.iss = null
10189 sasl.oauthbearer.assertion.claim.jti.include = false
10190 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10191 sasl.oauthbearer.assertion.claim.sub = null
10192 sasl.oauthbearer.assertion.file = null
10193 sasl.oauthbearer.assertion.private.key.file = null
10194 sasl.oauthbearer.assertion.private.key.passphrase = null
10195 sasl.oauthbearer.assertion.template.file = null
10196 sasl.oauthbearer.client.credentials.client.id = null
10197 sasl.oauthbearer.client.credentials.client.secret = null
10198 sasl.oauthbearer.clock.skew.seconds = 30
10199 sasl.oauthbearer.expected.audience = null
10200 sasl.oauthbearer.expected.issuer = null
10201 sasl.oauthbearer.header.urlencode = false
10202 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10203 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10204 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10205 sasl.oauthbearer.jwks.endpoint.url = null
10206 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10207 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10208 sasl.oauthbearer.scope = null
10209 sasl.oauthbearer.scope.claim.name = scope
10210 sasl.oauthbearer.sub.claim.name = sub
10211 sasl.oauthbearer.token.endpoint.url = null
10212 security.protocol = PLAINTEXT
10213 security.providers = null
10214 send.buffer.bytes = 131072
10215 socket.connection.setup.timeout.max.ms = 30000
10216 socket.connection.setup.timeout.ms = 10000
10217 ssl.cipher.suites = null
10218 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10219 ssl.endpoint.identification.algorithm = https
10220 ssl.engine.factory.class = null
10221 ssl.key.password = null
10222 ssl.keymanager.algorithm = SunX509
10223 ssl.keystore.certificate.chain = null
10224 ssl.keystore.key = null
10225 ssl.keystore.location = null
10226 ssl.keystore.password = null
10227 ssl.keystore.type = JKS
10228 ssl.protocol = TLSv1.3
10229 ssl.provider = null
10230 ssl.secure.random.implementation = null
10231 ssl.trustmanager.algorithm = PKIX
10232 ssl.truststore.certificates = null
10233 ssl.truststore.location = null
10234 ssl.truststore.password = null
10235 ssl.truststore.type = JKS
10236 transaction.timeout.ms = 60000
10237 transaction.two.phase.commit.enable = false
10238 transactional.id = null
10239 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10240
1024101:33:38.668 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1024201:33:38.668 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Instantiated an idempotent producer.
1024301:33:38.670 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1024401:33:38.670 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1024501:33:38.670 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832418670
1024601:33:38.672 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.Metadata - [Producer clientId=producer-28] Cluster ID: 4oa31apqQtabsfPXH-H0RA
1024701:33:38.672 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-28] ProducerId set to 27 with epoch 0
1024801:33:38.679 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1024901:33:38.681 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1025001:33:38.681 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1025101:33:38.681 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1025201:33:38.681 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1025301:33:38.681 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-28 unregistered
1025401:33:38.681 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10255 acks = -1
10256 batch.size = 16384
10257 bootstrap.servers = [localhost:6001]
10258 buffer.memory = 33554432
10259 client.dns.lookup = use_all_dns_ips
10260 client.id = producer-29
10261 compression.gzip.level = -1
10262 compression.lz4.level = 9
10263 compression.type = none
10264 compression.zstd.level = 3
10265 connections.max.idle.ms = 540000
10266 delivery.timeout.ms = 120000
10267 enable.idempotence = true
10268 enable.metrics.push = true
10269 interceptor.classes = []
10270 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10271 linger.ms = 5
10272 max.block.ms = 10000
10273 max.in.flight.requests.per.connection = 5
10274 max.request.size = 1048576
10275 metadata.max.age.ms = 300000
10276 metadata.max.idle.ms = 300000
10277 metadata.recovery.rebootstrap.trigger.ms = 300000
10278 metadata.recovery.strategy = rebootstrap
10279 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10280 metrics.num.samples = 2
10281 metrics.recording.level = INFO
10282 metrics.sample.window.ms = 30000
10283 partitioner.adaptive.partitioning.enable = true
10284 partitioner.availability.timeout.ms = 0
10285 partitioner.class = null
10286 partitioner.ignore.keys = false
10287 receive.buffer.bytes = 32768
10288 reconnect.backoff.max.ms = 1000
10289 reconnect.backoff.ms = 50
10290 request.timeout.ms = 30000
10291 retries = 2147483647
10292 retry.backoff.max.ms = 1000
10293 retry.backoff.ms = 1000
10294 sasl.client.callback.handler.class = null
10295 sasl.jaas.config = null
10296 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10297 sasl.kerberos.min.time.before.relogin = 60000
10298 sasl.kerberos.service.name = null
10299 sasl.kerberos.ticket.renew.jitter = 0.05
10300 sasl.kerberos.ticket.renew.window.factor = 0.8
10301 sasl.login.callback.handler.class = null
10302 sasl.login.class = null
10303 sasl.login.connect.timeout.ms = null
10304 sasl.login.read.timeout.ms = null
10305 sasl.login.refresh.buffer.seconds = 300
10306 sasl.login.refresh.min.period.seconds = 60
10307 sasl.login.refresh.window.factor = 0.8
10308 sasl.login.refresh.window.jitter = 0.05
10309 sasl.login.retry.backoff.max.ms = 10000
10310 sasl.login.retry.backoff.ms = 100
10311 sasl.mechanism = GSSAPI
10312 sasl.oauthbearer.assertion.algorithm = RS256
10313 sasl.oauthbearer.assertion.claim.aud = null
10314 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10315 sasl.oauthbearer.assertion.claim.iss = null
10316 sasl.oauthbearer.assertion.claim.jti.include = false
10317 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10318 sasl.oauthbearer.assertion.claim.sub = null
10319 sasl.oauthbearer.assertion.file = null
10320 sasl.oauthbearer.assertion.private.key.file = null
10321 sasl.oauthbearer.assertion.private.key.passphrase = null
10322 sasl.oauthbearer.assertion.template.file = null
10323 sasl.oauthbearer.client.credentials.client.id = null
10324 sasl.oauthbearer.client.credentials.client.secret = null
10325 sasl.oauthbearer.clock.skew.seconds = 30
10326 sasl.oauthbearer.expected.audience = null
10327 sasl.oauthbearer.expected.issuer = null
10328 sasl.oauthbearer.header.urlencode = false
10329 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10330 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10331 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10332 sasl.oauthbearer.jwks.endpoint.url = null
10333 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10334 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10335 sasl.oauthbearer.scope = null
10336 sasl.oauthbearer.scope.claim.name = scope
10337 sasl.oauthbearer.sub.claim.name = sub
10338 sasl.oauthbearer.token.endpoint.url = null
10339 security.protocol = PLAINTEXT
10340 security.providers = null
10341 send.buffer.bytes = 131072
10342 socket.connection.setup.timeout.max.ms = 30000
10343 socket.connection.setup.timeout.ms = 10000
10344 ssl.cipher.suites = null
10345 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10346 ssl.endpoint.identification.algorithm = https
10347 ssl.engine.factory.class = null
10348 ssl.key.password = null
10349 ssl.keymanager.algorithm = SunX509
10350 ssl.keystore.certificate.chain = null
10351 ssl.keystore.key = null
10352 ssl.keystore.location = null
10353 ssl.keystore.password = null
10354 ssl.keystore.type = JKS
10355 ssl.protocol = TLSv1.3
10356 ssl.provider = null
10357 ssl.secure.random.implementation = null
10358 ssl.trustmanager.algorithm = PKIX
10359 ssl.truststore.certificates = null
10360 ssl.truststore.location = null
10361 ssl.truststore.password = null
10362 ssl.truststore.type = JKS
10363 transaction.timeout.ms = 60000
10364 transaction.two.phase.commit.enable = false
10365 transactional.id = null
10366 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10367
1036801:33:38.681 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1036901:33:38.682 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Instantiated an idempotent producer.
1037001:33:38.683 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1037101:33:38.684 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1037201:33:38.684 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832418683
1037301:33:38.686 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.Metadata - [Producer clientId=producer-29] Cluster ID: 4oa31apqQtabsfPXH-H0RA
1037401:33:38.686 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-29] ProducerId set to 28 with epoch 0
1037501:33:38.695 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1037601:33:38.697 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1037701:33:38.697 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1037801:33:38.697 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1037901:33:38.697 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1038001:33:38.697 [pool-67-thread-9-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-29 unregistered
1038101:33:38.698 [virtual-745] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10382 allow.auto.create.topics = true
10383 auto.commit.interval.ms = 5000
10384 auto.offset.reset = earliest
10385 bootstrap.servers = [localhost:6001]
10386 check.crcs = true
10387 client.dns.lookup = use_all_dns_ips
10388 client.id = consumer-g8_1-18
10389 client.rack =
10390 connections.max.idle.ms = 540000
10391 default.api.timeout.ms = 60000
10392 enable.auto.commit = false
10393 enable.metrics.push = true
10394 exclude.internal.topics = true
10395 fetch.max.bytes = 52428800
10396 fetch.max.wait.ms = 500
10397 fetch.min.bytes = 1
10398 group.id = g8_1
10399 group.instance.id = null
10400 group.protocol = classic
10401 group.remote.assignor = null
10402 heartbeat.interval.ms = 3000
10403 interceptor.classes = []
10404 internal.leave.group.on.close = true
10405 internal.throw.on.fetch.stable.offset.unsupported = false
10406 isolation.level = read_uncommitted
10407 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10408 max.partition.fetch.bytes = 1048576
10409 max.poll.interval.ms = 300000
10410 max.poll.records = 500
10411 metadata.max.age.ms = 300000
10412 metadata.recovery.rebootstrap.trigger.ms = 300000
10413 metadata.recovery.strategy = rebootstrap
10414 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10415 metrics.num.samples = 2
10416 metrics.recording.level = INFO
10417 metrics.sample.window.ms = 30000
10418 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10419 receive.buffer.bytes = 65536
10420 reconnect.backoff.max.ms = 1000
10421 reconnect.backoff.ms = 50
10422 request.timeout.ms = 30000
10423 retry.backoff.max.ms = 1000
10424 retry.backoff.ms = 100
10425 sasl.client.callback.handler.class = null
10426 sasl.jaas.config = null
10427 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10428 sasl.kerberos.min.time.before.relogin = 60000
10429 sasl.kerberos.service.name = null
10430 sasl.kerberos.ticket.renew.jitter = 0.05
10431 sasl.kerberos.ticket.renew.window.factor = 0.8
10432 sasl.login.callback.handler.class = null
10433 sasl.login.class = null
10434 sasl.login.connect.timeout.ms = null
10435 sasl.login.read.timeout.ms = null
10436 sasl.login.refresh.buffer.seconds = 300
10437 sasl.login.refresh.min.period.seconds = 60
10438 sasl.login.refresh.window.factor = 0.8
10439 sasl.login.refresh.window.jitter = 0.05
10440 sasl.login.retry.backoff.max.ms = 10000
10441 sasl.login.retry.backoff.ms = 100
10442 sasl.mechanism = GSSAPI
10443 sasl.oauthbearer.assertion.algorithm = RS256
10444 sasl.oauthbearer.assertion.claim.aud = null
10445 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10446 sasl.oauthbearer.assertion.claim.iss = null
10447 sasl.oauthbearer.assertion.claim.jti.include = false
10448 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10449 sasl.oauthbearer.assertion.claim.sub = null
10450 sasl.oauthbearer.assertion.file = null
10451 sasl.oauthbearer.assertion.private.key.file = null
10452 sasl.oauthbearer.assertion.private.key.passphrase = null
10453 sasl.oauthbearer.assertion.template.file = null
10454 sasl.oauthbearer.client.credentials.client.id = null
10455 sasl.oauthbearer.client.credentials.client.secret = null
10456 sasl.oauthbearer.clock.skew.seconds = 30
10457 sasl.oauthbearer.expected.audience = null
10458 sasl.oauthbearer.expected.issuer = null
10459 sasl.oauthbearer.header.urlencode = false
10460 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10461 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10462 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10463 sasl.oauthbearer.jwks.endpoint.url = null
10464 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10465 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10466 sasl.oauthbearer.scope = null
10467 sasl.oauthbearer.scope.claim.name = scope
10468 sasl.oauthbearer.sub.claim.name = sub
10469 sasl.oauthbearer.token.endpoint.url = null
10470 security.protocol = PLAINTEXT
10471 security.providers = null
10472 send.buffer.bytes = 131072
10473 session.timeout.ms = 45000
10474 share.acknowledgement.mode = implicit
10475 socket.connection.setup.timeout.max.ms = 30000
10476 socket.connection.setup.timeout.ms = 10000
10477 ssl.cipher.suites = null
10478 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10479 ssl.endpoint.identification.algorithm = https
10480 ssl.engine.factory.class = null
10481 ssl.key.password = null
10482 ssl.keymanager.algorithm = SunX509
10483 ssl.keystore.certificate.chain = null
10484 ssl.keystore.key = null
10485 ssl.keystore.location = null
10486 ssl.keystore.password = null
10487 ssl.keystore.type = JKS
10488 ssl.protocol = TLSv1.3
10489 ssl.provider = null
10490 ssl.secure.random.implementation = null
10491 ssl.trustmanager.algorithm = PKIX
10492 ssl.truststore.certificates = null
10493 ssl.truststore.location = null
10494 ssl.truststore.password = null
10495 ssl.truststore.type = JKS
10496 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10497
1049801:33:38.698 [virtual-745] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1049901:33:38.701 [virtual-745] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1050001:33:38.701 [virtual-745] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1050101:33:38.701 [virtual-745] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832418701
1050201:33:38.702 [virtual-746] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Subscribed to topic(s): t8_1
1050301:33:38.704 [virtual-746] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
1050401:33:38.705 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1050501:33:38.705 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1050601:33:38.707 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70 and requesting the member to rejoin with this id.
1050701:33:38.707 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70
1050801:33:38.708 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1050901:33:38.708 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70 joins group g8_1 in Empty state. Adding to the group now.
1051001:33:38.709 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70).
1051101:33:41.708 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 1 with 1 members.
1051201:33:41.709 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70', protocol='range'}
1051301:33:41.709 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Finished assignment for group at generation 1: {consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70=Assignment(partitions=[t8_1-0])}
1051401:33:41.710 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70 for group g8_1 for generation 1. The group has 1 members, 0 of which are static.
1051501:33:41.716 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70', protocol='range'}
1051601:33:41.716 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1051701:33:41.716 [virtual-746] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1051801:33:41.717 [virtual-746] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Found no committed offset for partition t8_1-0
1051901:33:41.719 [virtual-746] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1052001:33:41.721 [virtual-748] ERROR o.k.KafkaFlow$ - Exception when polling for records
10521ox.flow.FlowOps$$anon$1: abort take
1052201:33:41.728 [virtual-753] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1052301:33:41.728 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Member consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1052401:33:41.729 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1052501:33:41.729 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1052601:33:41.729 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1052701:33:41.729 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_1-18-e17bfa1c-3357-4a56-88e9-9ed4e3e9ad70) members.).
1052801:33:41.729 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 2 is now empty.
1052901:33:42.222 [virtual-753] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1053001:33:42.223 [virtual-753] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1053101:33:42.223 [virtual-753] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1053201:33:42.223 [virtual-753] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1053301:33:42.224 [virtual-753] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-18 unregistered
1053401:33:42.225 [virtual-754] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10535 allow.auto.create.topics = true
10536 auto.commit.interval.ms = 5000
10537 auto.offset.reset = earliest
10538 bootstrap.servers = [localhost:6001]
10539 check.crcs = true
10540 client.dns.lookup = use_all_dns_ips
10541 client.id = consumer-g8_1-19
10542 client.rack =
10543 connections.max.idle.ms = 540000
10544 default.api.timeout.ms = 60000
10545 enable.auto.commit = false
10546 enable.metrics.push = true
10547 exclude.internal.topics = true
10548 fetch.max.bytes = 52428800
10549 fetch.max.wait.ms = 500
10550 fetch.min.bytes = 1
10551 group.id = g8_1
10552 group.instance.id = null
10553 group.protocol = classic
10554 group.remote.assignor = null
10555 heartbeat.interval.ms = 3000
10556 interceptor.classes = []
10557 internal.leave.group.on.close = true
10558 internal.throw.on.fetch.stable.offset.unsupported = false
10559 isolation.level = read_uncommitted
10560 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10561 max.partition.fetch.bytes = 1048576
10562 max.poll.interval.ms = 300000
10563 max.poll.records = 500
10564 metadata.max.age.ms = 300000
10565 metadata.recovery.rebootstrap.trigger.ms = 300000
10566 metadata.recovery.strategy = rebootstrap
10567 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10568 metrics.num.samples = 2
10569 metrics.recording.level = INFO
10570 metrics.sample.window.ms = 30000
10571 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10572 receive.buffer.bytes = 65536
10573 reconnect.backoff.max.ms = 1000
10574 reconnect.backoff.ms = 50
10575 request.timeout.ms = 30000
10576 retry.backoff.max.ms = 1000
10577 retry.backoff.ms = 100
10578 sasl.client.callback.handler.class = null
10579 sasl.jaas.config = null
10580 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10581 sasl.kerberos.min.time.before.relogin = 60000
10582 sasl.kerberos.service.name = null
10583 sasl.kerberos.ticket.renew.jitter = 0.05
10584 sasl.kerberos.ticket.renew.window.factor = 0.8
10585 sasl.login.callback.handler.class = null
10586 sasl.login.class = null
10587 sasl.login.connect.timeout.ms = null
10588 sasl.login.read.timeout.ms = null
10589 sasl.login.refresh.buffer.seconds = 300
10590 sasl.login.refresh.min.period.seconds = 60
10591 sasl.login.refresh.window.factor = 0.8
10592 sasl.login.refresh.window.jitter = 0.05
10593 sasl.login.retry.backoff.max.ms = 10000
10594 sasl.login.retry.backoff.ms = 100
10595 sasl.mechanism = GSSAPI
10596 sasl.oauthbearer.assertion.algorithm = RS256
10597 sasl.oauthbearer.assertion.claim.aud = null
10598 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10599 sasl.oauthbearer.assertion.claim.iss = null
10600 sasl.oauthbearer.assertion.claim.jti.include = false
10601 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10602 sasl.oauthbearer.assertion.claim.sub = null
10603 sasl.oauthbearer.assertion.file = null
10604 sasl.oauthbearer.assertion.private.key.file = null
10605 sasl.oauthbearer.assertion.private.key.passphrase = null
10606 sasl.oauthbearer.assertion.template.file = null
10607 sasl.oauthbearer.client.credentials.client.id = null
10608 sasl.oauthbearer.client.credentials.client.secret = null
10609 sasl.oauthbearer.clock.skew.seconds = 30
10610 sasl.oauthbearer.expected.audience = null
10611 sasl.oauthbearer.expected.issuer = null
10612 sasl.oauthbearer.header.urlencode = false
10613 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10614 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10615 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10616 sasl.oauthbearer.jwks.endpoint.url = null
10617 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10618 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10619 sasl.oauthbearer.scope = null
10620 sasl.oauthbearer.scope.claim.name = scope
10621 sasl.oauthbearer.sub.claim.name = sub
10622 sasl.oauthbearer.token.endpoint.url = null
10623 security.protocol = PLAINTEXT
10624 security.providers = null
10625 send.buffer.bytes = 131072
10626 session.timeout.ms = 45000
10627 share.acknowledgement.mode = implicit
10628 socket.connection.setup.timeout.max.ms = 30000
10629 socket.connection.setup.timeout.ms = 10000
10630 ssl.cipher.suites = null
10631 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10632 ssl.endpoint.identification.algorithm = https
10633 ssl.engine.factory.class = null
10634 ssl.key.password = null
10635 ssl.keymanager.algorithm = SunX509
10636 ssl.keystore.certificate.chain = null
10637 ssl.keystore.key = null
10638 ssl.keystore.location = null
10639 ssl.keystore.password = null
10640 ssl.keystore.type = JKS
10641 ssl.protocol = TLSv1.3
10642 ssl.provider = null
10643 ssl.secure.random.implementation = null
10644 ssl.trustmanager.algorithm = PKIX
10645 ssl.truststore.certificates = null
10646 ssl.truststore.location = null
10647 ssl.truststore.password = null
10648 ssl.truststore.type = JKS
10649 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10650
1065101:33:42.225 [virtual-754] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1065201:33:42.227 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1065301:33:42.227 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1065401:33:42.227 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832422227
1065501:33:42.227 [virtual-757] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Subscribed to topic(s): t8_1
1065601:33:42.230 [virtual-757] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Cluster ID: 4oa31apqQtabsfPXH-H0RA
1065701:33:42.230 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1065801:33:42.231 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1065901:33:42.232 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997 and requesting the member to rejoin with this id.
1066001:33:42.232 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997
1066101:33:42.233 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1066201:33:42.233 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997 joins group g8_1 in Empty state. Adding to the group now.
1066301:33:42.233 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997).
1066401:33:45.234 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 3 with 1 members.
1066501:33:45.235 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997', protocol='range'}
1066601:33:45.235 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Finished assignment for group at generation 3: {consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997=Assignment(partitions=[t8_1-0])}
1066701:33:45.236 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997 for group g8_1 for generation 3. The group has 1 members, 0 of which are static.
1066801:33:45.242 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997', protocol='range'}
1066901:33:45.242 [virtual-757] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1067001:33:45.242 [virtual-757] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1067101:33:45.243 [virtual-757] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t8_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
1067201:33:45.247 [virtual-754] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10673 allow.auto.create.topics = true
10674 auto.commit.interval.ms = 5000
10675 auto.offset.reset = earliest
10676 bootstrap.servers = [localhost:6001]
10677 check.crcs = true
10678 client.dns.lookup = use_all_dns_ips
10679 client.id = consumer-g8_2-20
10680 client.rack =
10681 connections.max.idle.ms = 540000
10682 default.api.timeout.ms = 60000
10683 enable.auto.commit = false
10684 enable.metrics.push = true
10685 exclude.internal.topics = true
10686 fetch.max.bytes = 52428800
10687 fetch.max.wait.ms = 500
10688 fetch.min.bytes = 1
10689 group.id = g8_2
10690 group.instance.id = null
10691 group.protocol = classic
10692 group.remote.assignor = null
10693 heartbeat.interval.ms = 3000
10694 interceptor.classes = []
10695 internal.leave.group.on.close = true
10696 internal.throw.on.fetch.stable.offset.unsupported = false
10697 isolation.level = read_uncommitted
10698 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10699 max.partition.fetch.bytes = 1048576
10700 max.poll.interval.ms = 300000
10701 max.poll.records = 500
10702 metadata.max.age.ms = 300000
10703 metadata.recovery.rebootstrap.trigger.ms = 300000
10704 metadata.recovery.strategy = rebootstrap
10705 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10706 metrics.num.samples = 2
10707 metrics.recording.level = INFO
10708 metrics.sample.window.ms = 30000
10709 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10710 receive.buffer.bytes = 65536
10711 reconnect.backoff.max.ms = 1000
10712 reconnect.backoff.ms = 50
10713 request.timeout.ms = 30000
10714 retry.backoff.max.ms = 1000
10715 retry.backoff.ms = 100
10716 sasl.client.callback.handler.class = null
10717 sasl.jaas.config = null
10718 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10719 sasl.kerberos.min.time.before.relogin = 60000
10720 sasl.kerberos.service.name = null
10721 sasl.kerberos.ticket.renew.jitter = 0.05
10722 sasl.kerberos.ticket.renew.window.factor = 0.8
10723 sasl.login.callback.handler.class = null
10724 sasl.login.class = null
10725 sasl.login.connect.timeout.ms = null
10726 sasl.login.read.timeout.ms = null
10727 sasl.login.refresh.buffer.seconds = 300
10728 sasl.login.refresh.min.period.seconds = 60
10729 sasl.login.refresh.window.factor = 0.8
10730 sasl.login.refresh.window.jitter = 0.05
10731 sasl.login.retry.backoff.max.ms = 10000
10732 sasl.login.retry.backoff.ms = 100
10733 sasl.mechanism = GSSAPI
10734 sasl.oauthbearer.assertion.algorithm = RS256
10735 sasl.oauthbearer.assertion.claim.aud = null
10736 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10737 sasl.oauthbearer.assertion.claim.iss = null
10738 sasl.oauthbearer.assertion.claim.jti.include = false
10739 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10740 sasl.oauthbearer.assertion.claim.sub = null
10741 sasl.oauthbearer.assertion.file = null
10742 sasl.oauthbearer.assertion.private.key.file = null
10743 sasl.oauthbearer.assertion.private.key.passphrase = null
10744 sasl.oauthbearer.assertion.template.file = null
10745 sasl.oauthbearer.client.credentials.client.id = null
10746 sasl.oauthbearer.client.credentials.client.secret = null
10747 sasl.oauthbearer.clock.skew.seconds = 30
10748 sasl.oauthbearer.expected.audience = null
10749 sasl.oauthbearer.expected.issuer = null
10750 sasl.oauthbearer.header.urlencode = false
10751 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10752 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10753 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10754 sasl.oauthbearer.jwks.endpoint.url = null
10755 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10756 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10757 sasl.oauthbearer.scope = null
10758 sasl.oauthbearer.scope.claim.name = scope
10759 sasl.oauthbearer.sub.claim.name = sub
10760 sasl.oauthbearer.token.endpoint.url = null
10761 security.protocol = PLAINTEXT
10762 security.providers = null
10763 send.buffer.bytes = 131072
10764 session.timeout.ms = 45000
10765 share.acknowledgement.mode = implicit
10766 socket.connection.setup.timeout.max.ms = 30000
10767 socket.connection.setup.timeout.ms = 10000
10768 ssl.cipher.suites = null
10769 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10770 ssl.endpoint.identification.algorithm = https
10771 ssl.engine.factory.class = null
10772 ssl.key.password = null
10773 ssl.keymanager.algorithm = SunX509
10774 ssl.keystore.certificate.chain = null
10775 ssl.keystore.key = null
10776 ssl.keystore.location = null
10777 ssl.keystore.password = null
10778 ssl.keystore.type = JKS
10779 ssl.protocol = TLSv1.3
10780 ssl.provider = null
10781 ssl.secure.random.implementation = null
10782 ssl.trustmanager.algorithm = PKIX
10783 ssl.truststore.certificates = null
10784 ssl.truststore.location = null
10785 ssl.truststore.password = null
10786 ssl.truststore.type = JKS
10787 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10788
1078901:33:45.247 [virtual-754] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1079001:33:45.249 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1079101:33:45.249 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1079201:33:45.249 [virtual-754] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1767832425249
1079301:33:45.249 [virtual-761] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Subscribed to topic(s): t8_1
1079401:33:45.251 [virtual-761] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Cluster ID: 4oa31apqQtabsfPXH-H0RA
1079501:33:45.252 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1079601:33:45.253 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1079701:33:45.254 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_2 in Empty state. Created a new member id consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8 and requesting the member to rejoin with this id.
1079801:33:45.255 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: need to re-join with the given member-id: consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8
1079901:33:45.255 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1080001:33:45.255 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8 joins group g8_2 in Empty state. Adding to the group now.
1080101:33:45.255 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8).
1080201:33:48.256 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_2 generation 1 with 1 members.
1080301:33:48.256 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8', protocol='range'}
1080401:33:48.256 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Finished assignment for group at generation 1: {consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8=Assignment(partitions=[t8_1-0])}
1080501:33:48.257 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8 for group g8_2 for generation 1. The group has 1 members, 0 of which are static.
1080601:33:48.264 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8', protocol='range'}
1080701:33:48.264 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1080801:33:48.264 [virtual-761] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Adding newly assigned partitions: [t8_1-0]
1080901:33:48.265 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Found no committed offset for partition t8_1-0
1081001:33:48.267 [virtual-761] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1081101:33:48.269 [virtual-760] ERROR o.k.KafkaFlow$ - Exception when polling for records
10812java.lang.InterruptedException: null
10813 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10814 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10815 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10816 at ox.channels.ActorRef.ask(actor.scala:64)
10817 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10818 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10819 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10820 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10821 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10822 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10823 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10824 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10825 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10826 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1082701:33:48.269 [virtual-756] ERROR o.k.KafkaFlow$ - Exception when polling for records
10828java.lang.InterruptedException: null
10829 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10830 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10831 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10832 at ox.channels.ActorRef.ask(actor.scala:64)
10833 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10834 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10835 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10836 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10837 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10838 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10839 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10840 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10841 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10842 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1084301:33:48.270 [virtual-757] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10844java.lang.InterruptedException: null
10845 ... 18 common frames omitted
10846Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10847 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10848 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10849 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10850 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10851 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10852 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10853 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10854 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10855 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10856 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10857 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10858 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10859 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10860 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10861 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10862 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10863 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10864 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1086501:33:48.270 [virtual-761] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10866java.lang.InterruptedException: null
10867 ... 18 common frames omitted
10868Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10869 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10870 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10871 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10872 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10873 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10874 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10875 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10876 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10877 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10878 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10879 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10880 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10881 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10882 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10883 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10884 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10885 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10886 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1088701:33:48.270 [virtual-763] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1088801:33:48.270 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Member consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1088901:33:48.270 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1089001:33:48.270 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1089101:33:48.271 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1089201:33:48.271 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g8_1-19-58e9448f-a2e4-4695-82ab-c93e33cc9997) members.).
1089301:33:48.271 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 4 is now empty.
1089401:33:48.271 [virtual-764] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Revoke previously assigned partitions [t8_1-0]
1089501:33:48.271 [virtual-764] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Member consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1089601:33:48.272 [virtual-764] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting generation and member id due to: consumer pro-actively leaving the group
1089701:33:48.272 [virtual-764] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: consumer pro-actively leaving the group
1089801:33:48.272 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_2] Member consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1089901:33:48.272 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_2-20-896ebcdb-d5e4-4a1f-b2d7-312c930d98a8) members.).
1090001:33:48.272 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_2 with generation 2 is now empty.
1090101:33:48.756 [virtual-763] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1090201:33:48.756 [virtual-763] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1090301:33:48.756 [virtual-763] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1090401:33:48.756 [virtual-763] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1090501:33:48.757 [virtual-763] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-19 unregistered
1090601:33:48.770 [virtual-764] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1090701:33:48.770 [virtual-764] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1090801:33:48.770 [virtual-764] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1090901:33:48.770 [virtual-764] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1091001:33:48.771 [virtual-764] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_2-20 unregistered
1091101:33:48.774 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTED to SHUTTING_DOWN
1091201:33:48.775 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] shutting down
1091301:33:48.776 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Beginning controlled shutdown.
1091401:33:48.777 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] Unfenced broker 0 has requested and been granted a controlled shutdown.
1091501:33:48.781 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] enterControlledShutdown[0]: changing 11 partition(s)
1091601:33:48.781 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=0, inControlledShutdown=1, logDirs=[])
1091701:33:48.808 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker is in PENDING_CONTROLLED_SHUTDOWN state, still waiting for the active controller.
1091801:33:48.808 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 11 partition(s) to local followers.
1091901:33:48.813 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t6_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092001:33:48.813 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092101:33:48.813 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower __consumer_offsets-0 starts at leader epoch 1 from offset 1056 with partition epoch 1 and high watermark 1056. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092201:33:48.813 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092301:33:48.813 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t4-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092401:33:48.813 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t8_1-0 starts at leader epoch 1 from offset 5 with partition epoch 1 and high watermark 5. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092501:33:48.813 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t7_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092601:33:48.814 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092701:33:48.814 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092801:33:48.814 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092901:33:48.814 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t2-0 starts at leader epoch 1 from offset 1000 with partition epoch 1 and high watermark 1000. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1093001:33:48.815 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1093101:33:48.815 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1093201:33:48.816 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Stopped fetchers as part of controlled shutdown for 11 partitions
1093301:33:48.817 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling unloading of metadata for __consumer_offsets-0 with epoch OptionalInt[1]
1093401:33:48.817 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Started unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1093501:33:48.817 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_1] Unloading group metadata for generation 4.
1093601:33:48.817 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_2] Unloading group metadata for generation 2.
1093701:33:48.817 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_1] Unloading group metadata for generation 4.
1093801:33:48.817 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_1] Unloading group metadata for generation 4.
1093901:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_1] Unloading group metadata for generation 4.
1094001:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_1] Unloading group metadata for generation 4.
1094101:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g1] Unloading group metadata for generation 2.
1094201:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_2] Unloading group metadata for generation 2.
1094301:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_2] Unloading group metadata for generation 2.
1094401:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_2] Unloading group metadata for generation 2.
1094501:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=embedded-kafka-spec] Unloading group metadata for generation 4.
1094601:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_2] Unloading group metadata for generation 2.
1094701:33:48.818 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1094801:33:48.859 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to shut down has been granted since the lowest active offset 9223372036854775807 is now greater than the broker's controlled shutdown offset 219.
1094901:33:48.860 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=1, inControlledShutdown=0, logDirs=[])
1095001:33:48.887 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The controller has asked us to exit controlled shutdown.
1095101:33:48.887 [broker-0-lifecycle-manager-event-handler] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] beginShutdown: shutting down event queue.
1095201:33:48.887 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Transitioning from PENDING_CONTROLLED_SHUTDOWN to SHUTTING_DOWN.
1095301:33:48.887 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutting down
1095401:33:48.888 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Stopped
1095501:33:48.888 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutdown completed
1095601:33:48.888 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopping socket server request processors
1095701:33:48.890 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for heartbeat shutdown
1095801:33:48.891 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopped socket server request processors
1095901:33:48.892 [pool-67-thread-9] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shutting down
1096001:33:48.893 [pool-67-thread-9] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shut down completely
1096101:33:48.893 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1096201:33:48.893 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1096301:33:48.894 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1096401:33:48.895 [pool-67-thread-9] INFO k.s.KafkaApis - [KafkaApi-0] Shutdown complete.
1096501:33:48.896 [pool-67-thread-9] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutting down.
1096601:33:48.897 [pool-67-thread-9] INFO k.c.t.TransactionStateManager - [Transaction State Manager 0]: Shutdown complete
1096701:33:48.897 [pool-67-thread-9] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutting down
1096801:33:48.897 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Stopped
1096901:33:48.897 [pool-67-thread-9] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutdown completed
1097001:33:48.898 [pool-67-thread-9] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutdown complete.
1097101:33:48.898 [pool-67-thread-9] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutting down.
1097201:33:48.898 [pool-67-thread-9] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Closing coordinator runtime.
1097301:33:48.899 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutting down
1097401:33:48.899 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Stopped
1097501:33:48.899 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutdown completed
1097601:33:48.899 [pool-67-thread-9] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Shutting down event processor.
1097701:33:48.900 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1097801:33:48.900 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutting down. Draining the remaining events.
1097901:33:48.900 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutdown completed
1098001:33:48.900 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutting down. Draining the remaining events.
1098101:33:48.900 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutdown completed
1098201:33:48.900 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutting down. Draining the remaining events.
1098301:33:48.900 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutdown completed
1098401:33:48.900 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutdown completed
1098501:33:48.900 [pool-67-thread-9] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Event processor closed.
1098601:33:48.901 [pool-67-thread-9] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Coordinator runtime closed.
1098701:33:48.902 [pool-67-thread-9] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutdown complete.
1098801:33:48.902 [pool-67-thread-9] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutting down.
1098901:33:48.902 [pool-67-thread-9] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Closing coordinator runtime.
1099001:33:48.902 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutting down
1099101:33:48.902 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Stopped
1099201:33:48.902 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutdown completed
1099301:33:48.903 [pool-67-thread-9] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Shutting down event processor.
1099401:33:48.903 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1099501:33:48.903 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutdown completed
1099601:33:48.903 [pool-67-thread-9] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Event processor closed.
1099701:33:48.903 [pool-67-thread-9] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Coordinator runtime closed.
1099801:33:48.904 [pool-67-thread-9] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutdown complete.
1099901:33:48.904 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]KafkaEventQueue#close: shutting down event queue.
1100001:33:48.904 [broker-0-directory-assignments-manager-event-handler] INFO o.a.k.s.AssignmentsManager - [AssignmentsManager id=0] shutting down.
1100101:33:48.904 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutting down
1100201:33:48.905 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Stopped
1100301:33:48.905 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutdown completed
1100401:33:48.905 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for directory-assignments shutdown
1100501:33:48.905 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]closed event queue.
1100601:33:48.906 [pool-67-thread-9] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shutting down
1100701:33:48.906 [pool-67-thread-9] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutting down
1100801:33:48.906 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Stopped
1100901:33:48.907 [pool-67-thread-9] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutdown completed
1101001:33:48.907 [pool-67-thread-9] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutting down
1101101:33:48.908 [pool-67-thread-9] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutdown completed
1101201:33:48.909 [pool-67-thread-9] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutting down
1101301:33:48.909 [pool-67-thread-9] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutdown completed
1101401:33:48.909 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutting down
1101501:33:48.910 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Stopped
1101601:33:48.910 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutdown completed
1101701:33:48.910 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutting down
1101801:33:48.911 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Stopped
1101901:33:48.911 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutdown completed
1102001:33:48.912 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutting down
1102101:33:48.912 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Stopped
1102201:33:48.912 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutdown completed
1102301:33:48.913 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutting down
1102401:33:48.913 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Stopped
1102501:33:48.914 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutdown completed
1102601:33:48.914 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutting down
1102701:33:48.915 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutdown completed
1102801:33:48.915 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Stopped
1102901:33:48.915 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutting down
1103001:33:48.916 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Stopped
1103101:33:48.916 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutdown completed
1103201:33:48.919 [pool-67-thread-9] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutting down
1103301:33:48.920 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Stopped
1103401:33:48.920 [pool-67-thread-9] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutdown completed
1103501:33:48.920 [pool-67-thread-9] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shut down completely
1103601:33:48.920 [pool-67-thread-9] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutting down
1103701:33:48.921 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Stopped
1103801:33:48.921 [pool-67-thread-9] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutdown completed
1103901:33:48.921 [pool-67-thread-9] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for alter-partition shutdown
1104001:33:48.922 [pool-67-thread-9] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutting down
1104101:33:48.922 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Stopped
1104201:33:48.923 [pool-67-thread-9] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutdown completed
1104301:33:48.923 [pool-67-thread-9] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for forwarding shutdown
1104401:33:48.923 [pool-67-thread-9] INFO k.l.LogManager - Shutting down.
1104501:33:48.924 [pool-67-thread-9] INFO o.a.k.s.i.l.LogCleaner - Shutting down the log cleaner.
1104601:33:48.924 [pool-67-thread-9] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutting down
1104701:33:48.925 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Stopped
1104801:33:48.925 [pool-67-thread-9] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutdown completed
1104901:33:48.930 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 2 ms.
1105001:33:48.930 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 2 ms.
1105101:33:48.933 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t2-0] Wrote producer snapshot at offset 1000 with 1 producer ids in 1 ms.
1105201:33:48.933 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__consumer_offsets-0] Wrote producer snapshot at offset 1056 with 0 producer ids in 1 ms.
1105301:33:48.935 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105401:33:48.935 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105501:33:48.936 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t7_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 0 ms.
1105601:33:48.936 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t8_1-0] Wrote producer snapshot at offset 5 with 5 producer ids in 1 ms.
1105701:33:48.937 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t4-0] Wrote producer snapshot at offset 3 with 1 producer ids in 1 ms.
1105801:33:48.937 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105901:33:48.939 [log-closing-/tmp/kafka-logs15769196062054598040] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t6_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 0 ms.
1106001:33:48.972 [pool-67-thread-9] INFO k.l.LogManager - Shutdown complete.
1106101:33:48.972 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutting down
1106201:33:48.973 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Stopped
1106301:33:48.973 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1106401:33:48.973 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutting down
1106501:33:48.973 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutdown completed
1106601:33:48.973 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Stopped
1106701:33:48.973 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutting down
1106801:33:48.974 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Stopped
1106901:33:48.975 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutdown completed
1107001:33:48.975 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1107101:33:48.975 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1107201:33:48.975 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1107301:33:48.976 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutting down socket server
1107401:33:48.989 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutdown completed
1107501:33:48.990 [pool-67-thread-9] INFO o.a.k.s.l.m.BrokerTopicStats - Broker and topic stats closed
1107601:33:48.990 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutting down
1107701:33:48.990 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Stopped
1107801:33:48.990 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutdown completed
1107901:33:48.991 [pool-67-thread-9] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutting down
1108001:33:48.992 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Stopped
1108101:33:48.992 [pool-67-thread-9] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutdown completed
1108201:33:48.992 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutting down
1108301:33:48.992 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Stopped
1108401:33:48.993 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutdown completed
1108501:33:48.993 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] closed event queue.
1108601:33:48.994 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutting down
1108701:33:48.994 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Stopped
1108801:33:48.994 [pool-67-thread-9] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutdown completed
1108901:33:48.995 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] shut down completed
1109001:33:48.995 [pool-67-thread-9] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTTING_DOWN to SHUTDOWN
1109101:33:48.995 [pool-67-thread-9] INFO k.s.ControllerServer - [ControllerServer id=0] shutting down
1109201:33:48.996 [pool-67-thread-9] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutting down
1109301:33:48.998 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Stopped
1109401:33:48.998 [pool-67-thread-9] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutdown completed
1109501:33:48.999 [pool-67-thread-9] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutting down
1109601:33:48.999 [pool-67-thread-9] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Beginning graceful shutdown
1109701:33:49.000 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Graceful shutdown completed
1109801:33:49.000 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [RaftManager id=0] Completed graceful shutdown of RaftClient
1109901:33:49.000 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Stopped
1110001:33:49.000 [pool-67-thread-9] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutdown completed
1110101:33:49.002 [pool-67-thread-9] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutting down
1110201:33:49.002 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Stopped
1110301:33:49.002 [pool-67-thread-9] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutdown completed
1110401:33:49.004 [pool-67-thread-9] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__cluster_metadata-0] Wrote producer snapshot at offset 222 with 0 producer ids in 1 ms.
1110501:33:49.006 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] beginShutdown: shutting down event queue.
1110601:33:49.007 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] shutting down.
1110701:33:49.007 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutting down
1110801:33:49.007 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Stopped
1110901:33:49.007 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1111001:33:49.008 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1111101:33:49.009 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] closed event queue.
1111201:33:49.009 [pool-67-thread-9] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1111301:33:49.009 [pool-67-thread-9] WARN o.a.k.c.NetworkClient - [NodeToControllerChannelManager id=0 name=registration] Attempting to close NetworkClient that has already been closed.
1111401:33:49.009 [pool-67-thread-9] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1111501:33:49.011 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=E5FeePitQvWsne22hoaE1A] closed event queue.
1111601:33:49.012 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopping socket server request processors
1111701:33:49.015 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopped socket server request processors
1111801:33:49.015 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] QuorumController#beginShutdown: shutting down event queue.
1111901:33:49.015 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutting down socket server
1112001:33:49.016 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] writeNoOpRecord: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112101:33:49.016 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] maybeFenceStaleBroker: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112201:33:49.016 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] generatePeriodicPerformanceMessage: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112301:33:49.016 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electPreferred: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112401:33:49.016 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electUnclean: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112501:33:49.016 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] expireDelegationTokens: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112601:33:49.022 [pool-67-thread-9] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutdown completed
1112701:33:49.023 [pool-67-thread-9] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shutting down
1112801:33:49.024 [pool-67-thread-9] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shut down completely
1112901:33:49.025 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1113001:33:49.026 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1113101:33:49.026 [pool-67-thread-9] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1113201:33:49.028 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutting down
1113301:33:49.028 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Stopped
1113401:33:49.029 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1113501:33:49.029 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutting down
1113601:33:49.030 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Stopped
1113701:33:49.030 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutdown completed
1113801:33:49.030 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutting down
1113901:33:49.030 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Stopped
1114001:33:49.030 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutdown completed
1114101:33:49.030 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1114201:33:49.031 [pool-67-thread-9] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1114301:33:49.031 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] closed event queue.
1114401:33:49.031 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1114501:33:49.033 [pool-67-thread-9] INFO k.s.SharedServer - [SharedServer id=0] Stopping SharedServer
1114601:33:49.033 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] beginShutdown: shutting down event queue.
1114701:33:49.033 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] beginShutdown: shutting down event queue.
1114801:33:49.034 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1114901:33:49.034 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] closed event queue.
1115001:33:49.034 [pool-67-thread-9] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1115101:33:49.036 [pool-67-thread-9] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1115201:33:49.036 [pool-67-thread-9] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1115301:33:49.036 [pool-67-thread-9] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1115401:33:49.037 [pool-67-thread-9] INFO o.a.k.c.u.AppInfoParser - App info kafka.server for 0 unregistered
11155[info] KafkaTest:
11156[info] source
11157[info] - should receive messages from a topic
11158[info] stage
11159[info] - should publish messages to a topic
11160[info] stage
11161[info] - should commit offsets of processed messages
11162[info] drain
11163[info] - should publish messages to a topic
11164[info] drain
11165[info] - should commit offsets of processed messages
11166[info] drain
11167[info] - should commit offsets using runCommit
11168[info] stage
11169[info] - should commit offsets using mapCommit
11170[info] stage
11171[info] - should commit offsets when consuming a finite stream using take
11172
11173************************
11174Build summary:
11175[{
11176 "module": "flow-reactive-streams",
11177 "compile": {"status": "ok", "tookMs": 16919, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11178 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11179 "test-compile": {"status": "ok", "tookMs": 306, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11180 "test": {"status": "ok", "tookMs": 224, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11181 "publish": {"status": "skipped", "tookMs": 0},
11182 "metadata": {
11183 "crossScalaVersions": ["2.12.20"]
11184}
11185},{
11186 "module": "mdc-logback",
11187 "compile": {"status": "ok", "tookMs": 688, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11188 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11189 "test-compile": {"status": "ok", "tookMs": 1203, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11190 "test": {"status": "ok", "tookMs": 600, "passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1, "byFramework": [{"framework": "unknown", "stats": {"passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1}}]},
11191 "publish": {"status": "skipped", "tookMs": 0},
11192 "metadata": {
11193 "crossScalaVersions": ["2.12.20"]
11194}
11195},{
11196 "module": "core",
11197 "compile": {"status": "ok", "tookMs": 70, "warnings": 13, "errors": 0, "sourceVersion": "3.8"},
11198 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11199 "test-compile": {"status": "ok", "tookMs": 22218, "warnings": 20, "errors": 0, "sourceVersion": "3.8"},
11200 "test": {"status": "ok", "tookMs": 150004, "passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802, "byFramework": [{"framework": "unknown", "stats": {"passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802}}]},
11201 "publish": {"status": "skipped", "tookMs": 0},
11202 "metadata": {
11203 "crossScalaVersions": ["2.12.20"]
11204}
11205},{
11206 "module": "cron",
11207 "compile": {"status": "ok", "tookMs": 417, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11208 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11209 "test-compile": {"status": "ok", "tookMs": 739, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11210 "test": {"status": "ok", "tookMs": 4417, "passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3, "byFramework": [{"framework": "unknown", "stats": {"passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3}}]},
11211 "publish": {"status": "skipped", "tookMs": 0},
11212 "metadata": {
11213 "crossScalaVersions": ["2.12.20"]
11214}
11215},{
11216 "module": "otel-context",
11217 "compile": {"status": "ok", "tookMs": 238, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11218 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11219 "test-compile": {"status": "ok", "tookMs": 166, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11220 "test": {"status": "ok", "tookMs": 170, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11221 "publish": {"status": "skipped", "tookMs": 0},
11222 "metadata": {
11223 "crossScalaVersions": ["2.12.20"]
11224}
11225},{
11226 "module": "kafka",
11227 "compile": {"status": "ok", "tookMs": 1004, "warnings": 1, "errors": 0, "sourceVersion": "3.8"},
11228 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11229 "test-compile": {"status": "ok", "tookMs": 1539, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11230 "test": {"status": "ok", "tookMs": 89465, "passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8, "byFramework": [{"framework": "unknown", "stats": {"passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8}}]},
11231 "publish": {"status": "skipped", "tookMs": 0},
11232 "metadata": {
11233 "crossScalaVersions": ["2.12.20"]
11234}
11235}]
11236************************
11237[success] Total time: 296 s (0:04:56.0), completed Jan 8, 2026, 1:33:49 AM
11238[0JChecking patch project/plugins.sbt...
11239Checking patch build.sbt...
11240Applied patch project/plugins.sbt cleanly.
11241Applied patch build.sbt cleanly.