Build Logs

softwaremill/ox • 3.8.0-RC3:2025-12-04

Errors

235

Warnings

473

Total Lines

11262

1##################################
2Clonning https://github.com/softwaremill/ox.git into /build/repo using revision v1.0.2
3##################################
4Note: switching to '9cb2ebc1df12e84198f24d8429f0ed135892788e'.
5
6You are in 'detached HEAD' state. You can look around, make experimental
7changes and commit them, and you can discard any commits you make in this
8state without impacting any branches by switching back to a branch.
9
10If you want to create a new branch to retain commits you create, you may
11do so (now or later) by using -c with the switch command. Example:
12
13 git switch -c <new-branch-name>
14
15Or undo this operation with:
16
17 git switch -
18
19Turn off this advice by setting config variable advice.detachedHead to false
20
21Using target Scala version for migration: 3.7.4
22Migrating project for -source:3.7 using Scala 3.7.4
23----
24Preparing build for 3.7.4
25Would try to apply common scalacOption (best-effort, sbt/mill only):
26Append: -rewrite,REQUIRE:-source:3.7-migration
27Remove: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
28----
29Starting build for 3.7.4
30Execute tests: false
31sbt project found:
32No prepare script found for project softwaremill/ox
33##################################
34Scala version: 3.7.4
35Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
36Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
37##################################
38Using extra scalacOptions: -rewrite,REQUIRE:-source:3.7-migration
39Filtering out scalacOptions: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
40[sbt_options] declare -a sbt_options=()
41[process_args] java_version = '21'
42[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
43# Executing command line:
44java
45-Dfile.encoding=UTF-8
46-Dcommunitybuild.scala=3.7.4
47-Dcommunitybuild.project.dependencies.add=
48-Xmx7G
49-Xms4G
50-Xss8M
51-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
52-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
53-jar
54/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
55"setCrossScalaVersions 3.7.4"
56"++3.7.4 -v"
57"mapScalacOptions "-rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s" "-indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
58"set every credentials := Nil"
59"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
60"removeScalacOptionsStartingWith -P:wartremover"
61
62moduleMappings
63"runBuild 3.7.4 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
64
65[info] [launcher] getting org.scala-sbt sbt 1.11.7 (this may take some time)...
66[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
67[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
68[info] loading project definition from /build/repo/project
69[info] compiling 2 Scala sources to /build/repo/project/target/scala-2.12/sbt-1.0/classes ...
70[info] Non-compiled module 'compiler-bridge_2.12' for Scala 2.12.20. Compiling...
71[info] Compilation completed in 8.943s.
72[info] done compiling
73[info] loading settings for project rootProject from build.sbt...
74[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
75[info] set current project to ox (in build file:/build/repo/)
76Execute setCrossScalaVersions: 3.7.4
77OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in flowReactiveStreams/crossScalaVersions
78OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in flowReactiveStreams/crossScalaVersions
79OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in documentation/crossScalaVersions
80OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in documentation/crossScalaVersions
81OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in rootProject/crossScalaVersions
82OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in rootProject/crossScalaVersions
83OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in kafka/crossScalaVersions
84OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in kafka/crossScalaVersions
85OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in otelContext/crossScalaVersions
86OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in otelContext/crossScalaVersions
87OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in mdcLogback/crossScalaVersions
88OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in mdcLogback/crossScalaVersions
89OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in cron/crossScalaVersions
90OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in core/crossScalaVersions
91OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in core/crossScalaVersions
92OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in cron/crossScalaVersions
93[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
94[info] set current project to ox (in build file:/build/repo/)
95[info] Setting Scala version to 3.7.4 on 8 projects.
96[info] Switching Scala version on:
97[info] flowReactiveStreams (3.7.4)
98[info] documentation (3.7.4)
99[info] cron (3.7.4)
100[info] * rootProject (3.7.4)
101[info] mdcLogback (3.7.4)
102[info] kafka (3.7.4)
103[info] core (3.7.4)
104[info] otelContext (3.7.4)
105[info] Excluding projects:
106[info] Reapplying settings...
107[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
108[info] set current project to ox (in build file:/build/repo/)
109Execute mapScalacOptions: -rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
110[info] Reapplying settings...
111[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
112[info] set current project to ox (in build file:/build/repo/)
113[info] Defining Global / credentials, core / credentials and 6 others.
114[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
115[info] Run `last` for details.
116[info] Reapplying settings...
117[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
118[info] set current project to ox (in build file:/build/repo/)
119Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
120[info] Reapplying settings...
121OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
122
123 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
124 Did you mean flowReactiveStreams / allExcludeDependencies ?
125 , retry without global scopes
126[info] Reapplying settings...
127[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
128[info] set current project to ox (in build file:/build/repo/)
129Execute removeScalacOptionsStartingWith: -P:wartremover
130[info] Reapplying settings...
131[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
132[info] set current project to ox (in build file:/build/repo/)
133[success] Total time: 0 s, completed Dec 4, 2025, 2:24:05 PM
134Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
135Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),CompileOnly,List()))
136Starting build...
137Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
138Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
139OpenCB::Exclude Scala3 specific scalacOption `-rewrite` in Scala 2.12.20 module Global
140OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.7-migration` in Scala 2.12.20 module Global
141OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
142OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
143Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
144[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
145[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
146[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
147[warn] | ^
148[warn] | unused implicit parameter
149[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
150[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
151[warn] | ^
152[warn] | unused implicit parameter
153[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
154[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
155[warn] | ^
156[warn] | unused explicit parameter
157[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
158[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
159[warn] | ^
160[warn] | unused explicit parameter
161[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/channels/SourceOps.scala:5:12
162[warn] 5 |import java.util
163[warn] | ^^^^
164[warn] | unused import
165[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
166[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
167[warn] | ^^^^
168[warn] | unused explicit parameter
169[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:5:10
170[warn] 5 |import ox.Ox
171[warn] | ^^
172[warn] | unused import
173[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:616:8
174[warn] 616 | tap(t => sleep(emitEveryMillis))
175[warn] | ^
176[warn] | unused explicit parameter
177[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:947:53
178[warn] 947 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
179[warn] | ^^^^
180[warn] | unused explicit parameter
181[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala:11:10
182[warn] 11 |import ox.fork
183[warn] | ^^^^
184[warn] | unused import
185[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
186[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
187[warn] | ^^^^^^
188[warn] | unused explicit parameter
189[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
190[warn] 123 | private var successCalls = 0
191[warn] | ^^^^^^^^^^^^
192[warn] | private variable was mutated but not read
193[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
194[warn] 167 | private var successCalls = 0
195[warn] | ^^^^^^^^^^^^
196[warn] | private variable was mutated but not read
197[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:414:15
198[warn] 414 | case Nested(t) =>
199[warn] | ^
200[warn] |the type test for Nested cannot be checked at runtime because it's a local class
201[warn] |
202[warn] | longer explanation available when compiling with `-explain`
203[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
204[warn] 150 | case FromParent(t) =>
205[warn] | ^
206[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
207[warn] |
208[warn] | longer explanation available when compiling with `-explain`
209[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
210[warn] 154 | case ChildDone(v) =>
211[warn] | ^
212[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
213[warn] |
214[warn] | longer explanation available when compiling with `-explain`
215[info] [patched file /build/repo/core/src/main/scala/ox/channels/SourceOps.scala]
216[info] [patched file /build/repo/core/src/main/scala/ox/local.scala]
217[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowOps.scala]
218[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala]
219[info] [patched file /build/repo/core/src/main/scala/ox/oxThreadFactory.scala]
220[warn] 16 warnings found
221[info] done compiling
222[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.7.4/classes ...
223[info] done compiling
224[info] compiling 5 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
225[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
226[warn] 615 | tap(t => sleep(emitEveryMillis))
227[warn] | ^
228[warn] | unused explicit parameter
229[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
230[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
231[warn] | ^^^^
232[warn] | unused explicit parameter
233[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
234[warn] 413 | case Nested(t) =>
235[warn] | ^
236[warn] |the type test for Nested cannot be checked at runtime because it's a local class
237[warn] |
238[warn] | longer explanation available when compiling with `-explain`
239[warn] three warnings found
240[info] done compiling
241[info] compiling 1 Scala source to /build/repo/core/target/scala-3.7.4/classes ...
242[warn] three warnings found
243[info] done compiling
244[info] compiling 25 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
245[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
246[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
247[warn] | ^^^^
248[warn] | unused explicit parameter
249[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
250[warn] 123 | private var successCalls = 0
251[warn] | ^^^^^^^^^^^^
252[warn] | private variable was mutated but not read
253[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
254[warn] 167 | private var successCalls = 0
255[warn] | ^^^^^^^^^^^^
256[warn] | private variable was mutated but not read
257[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
258[warn] 150 | case FromParent(t) =>
259[warn] | ^
260[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
261[warn] |
262[warn] | longer explanation available when compiling with `-explain`
263[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
264[warn] 154 | case ChildDone(v) =>
265[warn] | ^
266[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
267[warn] |
268[warn] | longer explanation available when compiling with `-explain`
269[warn] 8 warnings found
270[info] done compiling
271Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
272Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
273[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/classes ...
274[info] done compiling
275[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/test-classes ...
276[info] done compiling
277Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
278Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
279[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
280[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
281[warn] 43 | def transformation(i: Int) =
282[warn] | ^
283[warn] | unused explicit parameter
284[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
285[warn] 38 | def transformation(i: Int) =
286[warn] | ^
287[warn] | unused explicit parameter
288[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
289[warn] 43 | def transformation(i: Int) =
290[warn] | ^
291[warn] | unused explicit parameter
292[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:9:27
293[warn] 9 |import scala.util.boundary.*
294[warn] | ^
295[warn] | unused import
296[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:29
297[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
298[warn] | ^^^^^^^^^
299[warn] | unused import
300[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:40
301[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
302[warn] | ^^^^^^^^
303[warn] | unused import
304[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
305[warn] 80 | (1 to 5).map(i =>
306[warn] | ^
307[warn] | unused explicit parameter
308[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
309[warn] 126 | use(new TestResource, _.release()) { r =>
310[warn] | ^
311[warn] | unused explicit parameter
312[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
313[warn] 140 | useCloseable(new TestResource) { r =>
314[warn] | ^
315[warn] | unused explicit parameter
316[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
317[warn] 157 | use(new TestResource, _.release()) { r =>
318[warn] | ^
319[warn] | unused explicit parameter
320[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:3:43
321[warn] 3 |import org.scalatest.concurrent.Eventually.*
322[warn] | ^
323[warn] | unused import
324[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:3:43
325[warn] 3 |import org.scalatest.concurrent.Eventually.*
326[warn] | ^
327[warn] | unused import
328[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:387:44
329[warn] 387 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
330[warn] | ^
331[warn] | unused implicit parameter
332[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala:5:10
333[warn] 5 |import ox.*
334[warn] | ^
335[warn] | unused import
336[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala:5:10
337[warn] 5 |import ox.*
338[warn] | ^
339[warn] | unused import
340[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala:5:10
341[warn] 5 |import ox.*
342[warn] | ^
343[warn] | unused import
344[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala:5:10
345[warn] 5 |import ox.*
346[warn] | ^
347[warn] | unused import
348[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala:5:10
349[warn] 5 |import ox.*
350[warn] | ^
351[warn] | unused import
352[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala:5:10
353[warn] 5 |import ox.*
354[warn] | ^
355[warn] | unused import
356[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala:5:10
357[warn] 5 |import ox.*
358[warn] | ^
359[warn] | unused import
360[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala:5:10
361[warn] 5 |import ox.*
362[warn] | ^
363[warn] | unused import
364[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala:6:10
365[warn] 6 |import ox.*
366[warn] | ^
367[warn] | unused import
368[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala:5:10
369[warn] 5 |import ox.*
370[warn] | ^
371[warn] | unused import
372[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala:5:10
373[warn] 5 |import ox.*
374[warn] | ^
375[warn] | unused import
376[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala:5:10
377[warn] 5 |import ox.*
378[warn] | ^
379[warn] | unused import
380[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
381[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
382[warn] | ^
383[warn] | unused explicit parameter
384[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
385[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
386[warn] | ^
387[warn] | unused explicit parameter
388[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
389[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
390[warn] | ^
391[warn] | unused explicit parameter
392[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
393[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
394[warn] | ^
395[warn] | unused explicit parameter
396[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
397[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
398[warn] | ^
399[warn] | unused explicit parameter
400[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
401[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
402[warn] | ^
403[warn] | unused explicit parameter
404[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
405[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
406[warn] | ^
407[warn] | unused explicit parameter
408[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
409[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
410[warn] | ^
411[warn] | unused explicit parameter
412[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala:6:10
413[warn] 6 |import ox.*
414[warn] | ^
415[warn] | unused import
416[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala:5:10
417[warn] 5 |import ox.*
418[warn] | ^
419[warn] | unused import
420[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala:5:10
421[warn] 5 |import ox.*
422[warn] | ^
423[warn] | unused import
424[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala:5:10
425[warn] 5 |import ox.*
426[warn] | ^
427[warn] | unused import
428[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala:5:10
429[warn] 5 |import ox.*
430[warn] | ^
431[warn] | unused import
432[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala:5:10
433[warn] 5 |import ox.*
434[warn] | ^
435[warn] | unused import
436[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala:5:10
437[warn] 5 |import ox.*
438[warn] | ^
439[warn] | unused import
440[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala:5:10
441[warn] 5 |import ox.*
442[warn] | ^
443[warn] | unused import
444[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala:5:10
445[warn] 5 |import ox.*
446[warn] | ^
447[warn] | unused import
448[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala:5:10
449[warn] 5 |import ox.*
450[warn] | ^
451[warn] | unused import
452[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala:5:10
453[warn] 5 |import ox.*
454[warn] | ^
455[warn] | unused import
456[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala:6:10
457[warn] 6 |import ox.*
458[warn] | ^
459[warn] | unused import
460[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala:5:10
461[warn] 5 |import ox.*
462[warn] | ^
463[warn] | unused import
464[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala:5:10
465[warn] 5 |import ox.*
466[warn] | ^
467[warn] | unused import
468[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
469[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
470[warn] | ^^^^^^^
471[warn] | unused explicit parameter
472[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
473[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
474[warn] | ^^^^^^^
475[warn] | unused explicit parameter
476[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala]
477[info] [patched file /build/repo/core/src/test/scala/ox/OxAppTest.scala]
478[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala]
479[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala]
480[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala]
481[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala]
482[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala]
483[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala]
484[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala]
485[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala]
486[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipWithIndexTest.scala]
487[info] [patched file /build/repo/core/src/test/scala/ox/MapParTest.scala]
488[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala]
489[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala]
490[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala]
491[info] [patched file /build/repo/core/src/test/scala/ox/FilterParTest.scala]
492[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala]
493[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala]
494[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala]
495[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala]
496[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala]
497[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala]
498[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala]
499[info] [patched file /build/repo/core/src/test/scala/ox/CollectParTest.scala]
500[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala]
501[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala]
502[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala]
503[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala]
504[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala]
505[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala]
506[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala]
507[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala]
508[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala]
509[warn] 49 warnings found
510[info] done compiling
511[info] compiling 33 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
512[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
513[warn] 43 | def transformation(i: Int) =
514[warn] | ^
515[warn] | unused explicit parameter
516[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
517[warn] 43 | def transformation(i: Int) =
518[warn] | ^
519[warn] | unused explicit parameter
520[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
521[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
522[warn] | ^
523[warn] | unused implicit parameter
524[warn] three warnings found
525[info] done compiling
526Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
527Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
528[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/classes ...
529[info] done compiling
530[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
531[warn] -- [E198] Unused Symbol Warning: /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala:7:33
532[warn] 7 |import scala.concurrent.duration.*
533[warn] | ^
534[warn] | unused import
535[info] [patched file /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala]
536[warn] one warning found
537[info] done compiling
538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
539[info] done compiling
540Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
541Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
542[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.7.4/classes ...
543[info] done compiling
544Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
545Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
546[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
547[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
548[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
549[warn] | ^^^^^^^
550[warn] | unused explicit parameter
551[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala:3:41
552[warn] 3 |import org.apache.kafka.clients.consumer.ConsumerRecord
553[warn] | ^^^^^^^^^^^^^^
554[warn] | unused import
555[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala]
556[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaStage.scala]
557[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/kafkaOffsetCommit.scala]
558[warn] two warnings found
559[info] done compiling
560[info] compiling 3 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
561[info] done compiling
562[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.7.4/test-classes ...
563[info] [patched file /build/repo/kafka/src/test/scala/ox/kafka/KafkaTest.scala]
564[info] done compiling
565[info] compiling 1 Scala source to /build/repo/kafka/target/scala-3.7.4/test-classes ...
566[info] done compiling
567
568************************
569Build summary:
570[{
571 "module": "flow-reactive-streams",
572 "compile": {"status": "ok", "tookMs": 17277, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
573 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
574 "test-compile": {"status": "ok", "tookMs": 6887, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
575 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
576 "publish": {"status": "skipped", "tookMs": 0},
577 "metadata": {
578 "crossScalaVersions": ["2.12.20"]
579}
580},{
581 "module": "mdc-logback",
582 "compile": {"status": "ok", "tookMs": 487, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
583 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
584 "test-compile": {"status": "ok", "tookMs": 1380, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
585 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
586 "publish": {"status": "skipped", "tookMs": 0},
587 "metadata": {
588 "crossScalaVersions": ["2.12.20"]
589}
590},{
591 "module": "core",
592 "compile": {"status": "ok", "tookMs": 49, "warnings": 16, "errors": 0, "sourceVersion": "3.7-migration"},
593 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
594 "test-compile": {"status": "ok", "tookMs": 20671, "warnings": 49, "errors": 0, "sourceVersion": "3.7-migration"},
595 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
596 "publish": {"status": "skipped", "tookMs": 0},
597 "metadata": {
598 "crossScalaVersions": ["2.12.20"]
599}
600},{
601 "module": "cron",
602 "compile": {"status": "ok", "tookMs": 347, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
603 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
604 "test-compile": {"status": "ok", "tookMs": 1161, "warnings": 1, "errors": 0, "sourceVersion": "3.7-migration"},
605 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
606 "publish": {"status": "skipped", "tookMs": 0},
607 "metadata": {
608 "crossScalaVersions": ["2.12.20"]
609}
610},{
611 "module": "otel-context",
612 "compile": {"status": "ok", "tookMs": 196, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
613 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
614 "test-compile": {"status": "ok", "tookMs": 165, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
615 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
616 "publish": {"status": "skipped", "tookMs": 0},
617 "metadata": {
618 "crossScalaVersions": ["2.12.20"]
619}
620},{
621 "module": "kafka",
622 "compile": {"status": "ok", "tookMs": 806, "warnings": 2, "errors": 0, "sourceVersion": "3.7-migration"},
623 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
624 "test-compile": {"status": "ok", "tookMs": 1606, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
625 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
626 "publish": {"status": "skipped", "tookMs": 0},
627 "metadata": {
628 "crossScalaVersions": ["2.12.20"]
629}
630}]
631************************
632[success] Total time: 120 s (0:02:00.0), completed Dec 4, 2025, 2:26:05 PM
633[0JChecking patch project/plugins.sbt...
634Checking patch build.sbt...
635Applied patch project/plugins.sbt cleanly.
636Applied patch build.sbt cleanly.
637Commit migration rewrites
638Switched to a new branch 'opencb/migrate-source-3.7'
639[opencb/migrate-source-3.7 11ec849] Apply Scala compiler rewrites using -source:3.7-migration using Scala 3.7.4
640 43 files changed, 24 insertions(+), 60 deletions(-)
641----
642Preparing build for 3.8.0-RC3
643Scala binary version found: 3.8
644Implicitly using source version 3.8
645Scala binary version found: 3.8
646Implicitly using source version 3.8
647Would try to apply common scalacOption (best-effort, sbt/mill only):
648Append: ,REQUIRE:-source:3.8
649Remove: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
650----
651Starting build for 3.8.0-RC3
652Execute tests: true
653sbt project found:
654No prepare script found for project softwaremill/ox
655##################################
656Scala version: 3.8.0-RC3
657Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
658Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
659##################################
660Using extra scalacOptions: ,REQUIRE:-source:3.8
661Filtering out scalacOptions: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
662[sbt_options] declare -a sbt_options=()
663[process_args] java_version = '21'
664[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
665# Executing command line:
666java
667-Dfile.encoding=UTF-8
668-Dcommunitybuild.scala=3.8.0-RC3
669-Dcommunitybuild.project.dependencies.add=
670-Xmx7G
671-Xms4G
672-Xss8M
673-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
674-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
675-jar
676/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
677"setCrossScalaVersions 3.8.0-RC3"
678"++3.8.0-RC3 -v"
679"mapScalacOptions ",REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s" ",-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
680"set every credentials := Nil"
681"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
682"removeScalacOptionsStartingWith -P:wartremover"
683
684moduleMappings
685"runBuild 3.8.0-RC3 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
686
687[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
688[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
689[info] loading project definition from /build/repo/project
690[info] loading settings for project rootProject from build.sbt...
691[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
692[info] set current project to ox (in build file:/build/repo/)
693Execute setCrossScalaVersions: 3.8.0-RC3
694OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in core/crossScalaVersions
695OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in core/crossScalaVersions
696OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in mdcLogback/crossScalaVersions
697OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in mdcLogback/crossScalaVersions
698OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in flowReactiveStreams/crossScalaVersions
699OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in flowReactiveStreams/crossScalaVersions
700OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in cron/crossScalaVersions
701OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in cron/crossScalaVersions
702OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in documentation/crossScalaVersions
703OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in documentation/crossScalaVersions
704OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in rootProject/crossScalaVersions
705OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in rootProject/crossScalaVersions
706OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in kafka/crossScalaVersions
707OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in kafka/crossScalaVersions
708OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC3 in otelContext/crossScalaVersions
709OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC3) in otelContext/crossScalaVersions
710[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
711[info] set current project to ox (in build file:/build/repo/)
712[info] Setting Scala version to 3.8.0-RC3 on 8 projects.
713[info] Switching Scala version on:
714[info] flowReactiveStreams (3.8.0-RC3)
715[info] documentation (3.8.0-RC3)
716[info] cron (3.8.0-RC3)
717[info] * rootProject (3.8.0-RC3)
718[info] mdcLogback (3.8.0-RC3)
719[info] kafka (3.8.0-RC3)
720[info] core (3.8.0-RC3)
721[info] otelContext (3.8.0-RC3)
722[info] Excluding projects:
723[info] Reapplying settings...
724[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
725[info] set current project to ox (in build file:/build/repo/)
726Execute mapScalacOptions: ,REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
727[info] Reapplying settings...
728[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
729[info] set current project to ox (in build file:/build/repo/)
730[info] Defining Global / credentials, core / credentials and 6 others.
731[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
732[info] Run `last` for details.
733[info] Reapplying settings...
734[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
735[info] set current project to ox (in build file:/build/repo/)
736Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
737[info] Reapplying settings...
738OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
739
740 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
741 Did you mean flowReactiveStreams / allExcludeDependencies ?
742 , retry without global scopes
743[info] Reapplying settings...
744[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
745[info] set current project to ox (in build file:/build/repo/)
746Execute removeScalacOptionsStartingWith: -P:wartremover
747[info] Reapplying settings...
748[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
749[info] set current project to ox (in build file:/build/repo/)
750[success] Total time: 0 s, completed Dec 4, 2025, 2:26:16 PM
751Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
752Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),Full,List()))
753Starting build...
754Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
755Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
756OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.8` in Scala 2.12.20 module Global
757OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
758OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
759Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
760[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.8.0-RC3/classes ...
761[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
762[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
763[warn] | ^
764[warn] | unused implicit parameter
765[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
766[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
767[warn] | ^
768[warn] | unused implicit parameter
769[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
770[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
771[warn] | ^
772[warn] | unused explicit parameter
773[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
774[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
775[warn] | ^
776[warn] | unused explicit parameter
777[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
778[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
779[warn] | ^^^^
780[warn] | unused explicit parameter
781[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
782[warn] 615 | tap(t => sleep(emitEveryMillis))
783[warn] | ^
784[warn] | unused explicit parameter
785[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
786[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
787[warn] | ^^^^
788[warn] | unused explicit parameter
789[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
790[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
791[warn] | ^^^^^^
792[warn] | unused explicit parameter
793[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
794[warn] 123 | private var successCalls = 0
795[warn] | ^^^^^^^^^^^^
796[warn] | private variable was mutated but not read
797[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
798[warn] 167 | private var successCalls = 0
799[warn] | ^^^^^^^^^^^^
800[warn] | private variable was mutated but not read
801[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
802[warn] 413 | case Nested(t) =>
803[warn] | ^
804[warn] |the type test for Nested cannot be checked at runtime because it's a local class
805[warn] |
806[warn] | longer explanation available when compiling with `-explain`
807[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
808[warn] 150 | case FromParent(t) =>
809[warn] | ^
810[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
811[warn] |
812[warn] | longer explanation available when compiling with `-explain`
813[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
814[warn] 154 | case ChildDone(v) =>
815[warn] | ^
816[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
817[warn] |
818[warn] | longer explanation available when compiling with `-explain`
819[warn] 13 warnings found
820[info] done compiling
821[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.8.0-RC3/classes ...
822[info] done compiling
823Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
824Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
825[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0-RC3/classes ...
826[info] done compiling
827[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0-RC3/test-classes ...
828[info] done compiling
82914:26:48.132 [pool-28-thread-2] INFO ox.logback.InheritableMDC$ -- Scoped-value based MDC initialized
830[info] InheritableMDCTest:
831[info] - should make MDC values available in forks
832Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
833Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
834[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.8.0-RC3/test-classes ...
835[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
836[warn] 43 | def transformation(i: Int) =
837[warn] | ^
838[warn] | unused explicit parameter
839[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
840[warn] 38 | def transformation(i: Int) =
841[warn] | ^
842[warn] | unused explicit parameter
843[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
844[warn] 43 | def transformation(i: Int) =
845[warn] | ^
846[warn] | unused explicit parameter
847[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
848[warn] 80 | (1 to 5).map(i =>
849[warn] | ^
850[warn] | unused explicit parameter
851[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
852[warn] 126 | use(new TestResource, _.release()) { r =>
853[warn] | ^
854[warn] | unused explicit parameter
855[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
856[warn] 140 | useCloseable(new TestResource) { r =>
857[warn] | ^
858[warn] | unused explicit parameter
859[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
860[warn] 157 | use(new TestResource, _.release()) { r =>
861[warn] | ^
862[warn] | unused explicit parameter
863[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:5:19
864[warn] 5 |import ox.{timeout as _, *}
865[warn] | ^^^^^^^^^^^^
866[warn] | unused import
867[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:5:19
868[warn] 5 |import ox.{timeout as _, *}
869[warn] | ^^^^^^^^^^^^
870[warn] | unused import
871[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
872[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
873[warn] | ^
874[warn] | unused implicit parameter
875[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
876[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
877[warn] | ^
878[warn] | unused explicit parameter
879[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
880[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
881[warn] | ^
882[warn] | unused explicit parameter
883[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
884[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
885[warn] | ^
886[warn] | unused explicit parameter
887[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
888[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
889[warn] | ^
890[warn] | unused explicit parameter
891[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
892[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
893[warn] | ^
894[warn] | unused explicit parameter
895[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
896[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
897[warn] | ^
898[warn] | unused explicit parameter
899[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
900[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
901[warn] | ^
902[warn] | unused explicit parameter
903[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
904[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
905[warn] | ^
906[warn] | unused explicit parameter
907[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
908[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
909[warn] | ^^^^^^^
910[warn] | unused explicit parameter
911[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
912[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
913[warn] | ^^^^^^^
914[warn] | unused explicit parameter
915[warn] 20 warnings found
916[info] done compiling
917[info] AfterAttemptTest:
918[info] RetryPolicy afterAttempt callback
919[info] - should retry a succeeding function with afterAttempt callback
920[info] - should retry a failing function with afterAttempt callback
921[info] SourceOpsTest:
922[info] - should pipe one source to another
923[info] - should pipe one source to another (with done propagation)
924[info] - should tap over a source
925[info] FlowOpsReduceTest:
926[info] reduce
927[info] - should throw NoSuchElementException for reduce over the empty source
928[info] - should throw exception thrown in `f` when `f` throws
929[info] - should return first element from reduce over the single element source
930[info] - should run reduce over on non-empty source
931[info] ExceptionTest:
932[info] unsupervised
933[2025-12-04T13:27:11.480637649Z] [24] CustomException
934[info] - should throw the exception thrown by a joined fork
935[info] supervised
936[2025-12-04T13:27:11.487168505Z] [24] CustomException
937[info] - should throw the exception thrown in the scope
938[2025-12-04T13:27:11.489465620Z] [24] CustomException(suppressed=ExecutionException)
939[info] - should retain the original exception for context, as suppressed
940[2025-12-04T13:27:11.491870983Z] [24] CustomException
941[info] - should throw the exception thrown by a failing fork
942[2025-12-04T13:27:11.597239355Z] [24] CustomException(suppressed=ExecutionException,InterruptedException,InterruptedException)
943[info] - should interrupt other forks when there's a failure, add suppressed interrupted exceptions
944[2025-12-04T13:27:11.701022289Z] [24] CustomException(suppressed=ExecutionException,CustomException2)
945[info] - should interrupt other forks when there's a failure, add suppressed custom exceptions
946[2025-12-04T13:27:11.704173538Z] [24] CustomException(suppressed=ExecutionException,InterruptedException)
947[info] - should not add the original exception as suppressed
948[2025-12-04T13:27:11.710616119Z] [24] CustomException(suppressed=ExecutionException,CustomException3)
949[info] - should add an exception as suppressed, even if it wraps the original exception
950[info] joinEither
951[info] - should catch the exception with which a fork ends
952[info] ScheduleFallingBackRetryTest:
953[info] retry with combination of schedules
954[info] - should retry 3 times immediately and then 2 times with delay
955[info] - should retry forever
956[info] DelayedRetryTest:
957[info] Delayed retry
958[info] - should retry a function
959[info] - should retry a failing function forever
960[info] - should retry an Either
961[info] adaptive retry with delayed config
962[info] - should retry a failing function forever or until adaptive retry blocks it
963[info] CircuitBreakerTest:
964[info] Circuit Breaker run operations
965[info] - should run operation when metrics are not exceeded
966[info] - should drop operation after exceeding failure threshold
967[info] - should drop operation after exceeding slow call threshold
968[info] Circuit Breaker scheduled state changes
969[info] - should switch to halfOpen after configured time
970[info] - should switch back to open after configured timeout in half open state
971[info] - should correctly transitions through states when there are concurrently running operations
972[info] - should correctly calculate metrics when results come in after state change
973[info] FlowOpsTakeWhileTest:
974[info] takeWhile
975[info] - should not take from the empty flow
976[info] - should take as long as predicate is satisfied
977[info] - should take the failed element if includeFirstFailing = true
978[info] - should work if all elements match the predicate
979[info] - should fail the sourcewith the same exception as the initial source
980[info] - should not take if predicate fails for first or more elements
981[info] FlowOpsInterleaveAllTest:
982[info] interleaveAll
983[info] - should interleave no sources
984[info] - should interleave a single flow
985[info] - should interleave multiple flows
986[info] - should interleave multiple flows using custom segment size
987[info] - should interleave multiple flows using custom segment size and complete eagerly
988[info] AppErrorTest:
989[info] supervisedError
990[info] - should return the app error from the main body
991[info] - should return success from the main body
992[info] - should return the app error returned by a failing fork
993[info] - should return success from the main body if a fork is successful
994[info] - should interrupt other forks if one fails
995[info] ChunkTest:
996[info] Chunk
997[info] - should create empty chunks
998[info] - should create chunks from arrays
999[info] - should create chunks from IArrays
1000[info] - should create chunks from elements
1001[info] - should create empty chunks from empty arrays
1002[info] - should support random access
1003[info] - should throw IndexOutOfBoundsException for invalid indices
1004[info] - should support iteration
1005[info] - should support foreach operations
1006[info] - should concatenate two non-empty chunks efficiently
1007[info] - should handle concatenation with empty chunks
1008[info] - should support chained concatenation
1009[info] - should concatenate chunks of different types
1010[info] - should concatenate non-empty chunk with non-chunk collections
1011[info] - should concatenate empty chunk with non-chunk collections
1012[info] - should handle concatenation with empty collections
1013[info] - should support drop operations
1014[info] - should support take operations
1015[info] - should handle drop/take on concatenated chunks
1016[info] - should support map operations
1017[info] - should support filter operations
1018[info] - should support collect operations
1019[info] - should convert to arrays correctly
1020[info] - should convert concatenated chunks to arrays correctly
1021[info] - should convert byte chunks to strings
1022[info] - should convert concatenated byte chunks to strings
1023[info] - should provide access to backing arrays
1024[info] - should allow efficient processing via backing arrays
1025[info] - should handle operations on empty chunks
1026[info] - should maintain consistency between single and multi-array chunks
1027[info] - should handle large chunks efficiently
1028[info] - should support indexWhere on single chunks
1029[info] - should support indexWhere on concatenated chunks
1030[info] - should handle indexWhere on empty chunks
1031[info] - should handle indexWhere edge cases with concatenated chunks
1032[info] - should support contains and exists operations
1033[info] FlowOpsFoldTest:
1034[info] fold
1035[info] - should throw an exception for a failed flow
1036[info] - should throw exception thrown in `f` when `f` throws
1037[info] - should return `zero` value from fold on the empty source
1038[info] - should return fold on non-empty fold
1039[info] FlowOpsFilterTest:
1040[info] filter
1041[info] - should not filter anything from the empty flow
1042[info] - should filter out everything if no element meets 'f'
1043[info] - should not filter anything if all the elements meet 'f'
1044[info] - should filter out elements that don't meet 'f'
1045[info] FlowOpsMapUsingSinkTest:
1046[info] mapUsingSink
1047[info] - should map over a source, using emit
1048[info] - should propagate errors
1049[info] FlowOpsCollectTest:
1050[info] collect
1051[info] - should collect over a source
1052[info] FlowOpsGroupedTest:
1053[info] grouped
1054[info] - should emit grouped elements
1055[info] - should emit grouped elements and include remaining values when flow closes
1056[info] - should return failed flow when the original flow is failed
1057[info] groupedWeighted
1058[info] - should emit grouped elements with custom cost function
1059[info] - should return failed flow when cost function throws exception
1060[info] - should return failed source when the original source is failed
1061[info] groupedWithin
1062[info] - should group first batch of elements due to limit and second batch due to timeout
1063[info] - should group first batch of elements due to timeout and second batch due to limit
1064[info] - should wake up on new element and send it immediately after first batch is sent and channel goes to time-out mode
1065[info] - should send the group only once when the channel is closed
1066[info] - should return failed source when the original source is failed
1067[info] groupedWeightedWithin
1068[info] - should group elements on timeout in the first batch and consider max weight in the remaining batches
1069[info] - should return failed source when cost function throws exception
1070[info] - should return failed source when the original source is failed
1071[info] MapParTest:
1072[info] mapPar
1073[info] - should output the same type as input
1074[info] - should run computations in parallel
1075[info] - should run not more computations than limit
1076[2025-12-04T13:27:32.660362474Z] [386] exception
1077[2025-12-04T13:27:32.663821899Z] [24] catch
1078[2025-12-04T13:27:32.964084004Z] [24] all done
1079[info] - should interrupt other computations in one fails
1080[info] RateLimiterInterfaceTest:
1081[info] RateLimiter interface
1082[info] - should drop or block operation depending on method used for fixed rate algorithm
1083[info] - should drop or block operation depending on method used for sliding window algorithm
1084[info] - should drop or block operation depending on method used for bucket algorithm
1085[info] - should drop or block operation concurrently
1086[info] UtilTest:
1087[info] discard
1088[2025-12-04T13:27:38.991916165Z] [24] in f
1089[info] - should do nothing
1090[2025-12-04T13:27:38.992960269Z] [24] in callback: boom!
1091[2025-12-04T13:27:38.993036713Z] [24] in catch: boom!
1092[info] tapException
1093[info] - should run the callback when an exception is thrown
1094[2025-12-04T13:27:38.993635824Z] [24] 42
1095[2025-12-04T13:27:38.993724150Z] [24] after
1096[info] - should not run the callback when no exception is thrown
1097[2025-12-04T13:27:38.994775047Z] [24] in catch: boom! 1
1098[2025-12-04T13:27:38.996385502Z] [24] Adding
1099[2025-12-04T13:27:38.996482293Z] [24] Got: 3
1100[info] - should suppress any additional exceptions
1101[info] pipe
1102[info] - should work
1103[info] tap
1104some label: 10
1105[info] - should work
1106[info] debug as extension
1107[info] - should work
1108[info] debug as top-level method
1109x.+(1) = 11
1110[info] - should work
1111[info] FlowOpsLastTest:
1112[info] last
1113[info] - should throw NoSuchElementException for the empty source
1114[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
1115[info] - should return last element for the non-empty source
1116[info] FlowOpsFailedTest:
1117[info] failed
1118[info] - should fail on receive
1119[info] FlowOpsFlattenTest:
1120[info] flatten
1121[info] - should flatten nested flows
1122[info] WeightedHeapTest:
1123[info] WeightedHeap
1124[info] - should allow inserting elements with weights
1125[info] - should allow extracting the minimum element
1126[info] - should return None when extracting from an empty heap
1127[info] - should return the correct size after operations
1128[info] - should handle empty heaps correctly
1129[info] - should update the weight of an existing element and adjust its position
1130[info] - should throw an exception when updating the weight of a non-existent element
1131[info] - should handle multiple insertions and updates correctly
1132[info] - should handle duplicate insertions by updating the existing element's weight
1133[info] - should handle increasing the weight of an existing element
1134[info] - should maintain heap property after multiple weight increases
1135[info] - should work correctly when increasing the weight of the current minimum element
1136[info] - should handle increasing weights in a large heap
1137[info] - should maintain the heap property after multiple operations
1138[info] - should work with large numbers of elements
1139[info] - should maintain heap property with random insertions and extractions
1140[info] - should maintain heap property with random weight updates
1141[info] FlowOpsMapConcatTest:
1142[info] mapConcat
1143[info] - should unfold iterables
1144[info] - should transform elements
1145[info] - should handle empty lists
1146[info] - should propagate errors in the mapping function
1147[info] FlowOpsPipeToTest:
1148[info] - should pipe one source to another
1149[info] - should pipe one source to another (with done propagation)
1150[info] FlowOpsRecoverTest:
1151[info] Flow.recover
1152[info] - should pass through elements when upstream flow succeeds
1153[info] - should emit recovery value when upstream flow fails with handled exception
1154[info] - should not emit recovery value when downstream flow fails with handled exception
1155[info] - should propagate unhandled exceptions
1156[info] - should handle multiple exception types
1157[info] - should work with different recovery value type
1158[info] - should handle exception thrown during flow processing
1159[info] - should work with empty flow
1160[info] - should propagate exception when partial function throws
1161[info] FlowOpsMapStatefulTest:
1162[info] mapStateful
1163[info] - should zip with index
1164[info] - should calculate a running total
1165[info] - should be able to emit different values than incoming ones
1166[info] - should propagate errors in the mapping function
1167[info] - should propagate errors in the completion callback
1168[info] FlowOpsMapTest:
1169[info] map
1170[info] - should map over a source
1171[info] - should map over a source using for-syntax
1172[info] ForkTest:
1173[info] fork
1174[2025-12-04T13:27:39.230946505Z] [24] main mid
1175[2025-12-04T13:27:39.731259601Z] [435] f1 complete
1176[2025-12-04T13:27:40.231298496Z] [436] f2 complete
1177[2025-12-04T13:27:40.231559635Z] [24] result = 11
1178[info] - should run two forks concurrently
1179[2025-12-04T13:27:40.234441081Z] [438] f2 complete
1180[2025-12-04T13:27:40.234592294Z] [437] f1 complete
1181[2025-12-04T13:27:40.234755457Z] [24] result = 11
1182[info] - should allow nested forks
1183[2025-12-04T13:27:40.236288684Z] [24] main mid
1184[2025-12-04T13:27:40.736813688Z] [439] f1 complete
1185[2025-12-04T13:27:40.737053921Z] [24] result = 5
1186[2025-12-04T13:27:40.737237922Z] [440] f2 interrupted
1187[info] - should interrupt child forks when parents complete
1188[2025-12-04T13:27:40.740616026Z] [444] in fork
1189[info] - should allow starting forks within a forkCancellable body, using the outer scope
1190[2025-12-04T13:27:40.842449471Z] [447] in fork
1191[info] - should allow starting forks in outer scope, from an inner scope
1192[2025-12-04T13:27:40.844438748Z] [449] IllegalStateException
1193[info] - should not allow starting forks from a thread created not by the scope
1194[info] FlowOpsIntersperseTest:
1195[info] Flow.intersperse
1196[info] - should intersperse with inject only over an empty source
1197[info] - should intersperse with inject only over a source with one element
1198[info] - should intersperse with inject only over a source with multiple elements
1199[info] - should intersperse with start, inject and end over an empty source
1200[info] - should intersperse with start, inject and end over a source with one element
1201[info] - should intersperse with start, inject and end over a source with multiple elements
1202[info] FlowOpsScanTest:
1203[info] scan
1204[info] - should scan the empty flow
1205[info] - should scan a flow of summed Int
1206[info] - should scan a flow of multiplied Int
1207[info] - should scan a flow of concatenated String
1208[info] FlowOpsUsingSinkTest:
1209[info] usingSink
1210[info] - should send the passed elements
1211[info] FlowOpsTakeTest:
1212[info] take
1213[info] - should take from a simple flow
1214[info] - should take from an async flow
1215[info] - should take all if the flow ends sooner than the desired number of elements
1216[info] EitherTest:
1217[info] either
1218[info] - should work correctly when invoked on eithers
1219[info] - should work correctly when invoked on options
1220[info] - should work correctly when invoked on fork
1221[info] - should report a proper compilation error when used outside of either:
1222[info] - should report a proper compilation error when wrong error type is used for ok() (explicit type params)
1223[info] - should report a proper compilation error when wrong successful type is used (explicit type params)
1224[info] - should report a proper compilation error when wrong type annotation is used for ok() (error)
1225[info] - should report a proper compilation error when wrong type annotation is used (success)
1226[info] - should report a proper compilation error when wrong error type is used for fail() (explicit type params)
1227[info] - should report a proper compilation error when wrong type annotation is used for fail() (error)
1228[info] - should catch non fatal exceptions
1229[info] - should not catch fatal exceptions
1230[info] - should provide an either scope when catching non fatal exceptions
1231[info] - should report a proper compilation error when wrong error type is used for ok() in catchingNonFatal block
1232[info] - should work when combined with mapPar
1233[info] - should not allow nesting of eithers
1234[info] orThrow
1235[info] - should unwrap the value for a Right-value
1236[info] - should throw exceptions for a Left-value
1237[info] catching
1238[info] - should catch given exceptions only
1239[info] - should catch parent exceptions
1240[info] - should not catch non-given exceptions
1241[info] - should not catch fatal exceptions
1242[info] - should return successful results as Right-values
1243[info] FlowIOOpsTest:
1244[info] asInputStream
1245[info] - should return an empty InputStream for an empty source
1246[info] - should return an InputStream for a simple source
1247[info] - should correctly track available bytes
1248[info] - should support bulk read operations with read(byte[])
1249[info] - should handle bulk read operations across multiple chunks
1250[info] - should handle bulk read with concatenated chunks (multiple backing arrays)
1251[info] - should handle read(byte[], offset, length) with various parameters
1252[info] - should handle edge cases for read(byte[], offset, length)
1253[info] - should throw appropriate exceptions for invalid read parameters
1254[info] - should maintain consistency between single-byte and bulk reads
1255[info] - should handle chunks with empty backing arrays
1256[info] - should handle flow with only empty chunks
1257[info] - should handle mixed empty and non-empty chunks in flow
1258[info] toOutputStream
1259[info] - should write a single chunk with bytes to an OutputStream
1260[info] - should write multiple chunks with bytes to an OutputStream
1261[info] - should write concatenated chunks to an OutputStream
1262[info] - should handle an empty Source
1263[info] - should close the OutputStream on write error
1264[info] - should close the OutputStream on error
1265[info] toFile
1266[info] - should open existing file and write a single chunk with bytes
1267[info] - should open existing file and write multiple chunks with bytes
1268[info] - should create file and write multiple chunks with bytes
1269[info] - should write concatenated chunks to a file
1270[info] - should use an existing file and overwrite it a single chunk with bytes
1271[info] - should handle an empty source
1272[info] - should throw an exception on failing Source
1273[info] - should throw an exception if path is a directory
1274[info] - should throw an exception if file cannot be opened
1275[info] FlowOpsConcatPrependTest:
1276[info] concat
1277[info] - should concat other source
1278[info] prepend
1279[info] - should prepend other source
1280[info] FlowTextOpsTest:
1281[info] linesUtf8
1282[info] - should split a single chunk of bytes into lines
1283[info] - should split a single chunk of bytes into lines (multiple newlines)
1284[info] - should split a single chunk of bytes into lines (beginning with newline)
1285[info] - should split a single chunk of bytes into lines (ending with newline)
1286[info] - should split a single chunk of bytes into lines (empty array)
1287[info] - should split a multiple chunks of bytes into lines
1288[info] - should split a multiple chunks of bytes into lines (multiple newlines)
1289[info] - should split a multiple chunks of bytes into lines (multiple empty chunks)
1290[info] lines(charset)
1291zażółć
1292gęślą
1293jaźń
1294[info] - should decode lines with specified charset
1295[info] - should decode lines correctly across chunk boundaries
1296[info] decodeStringUtf8
1297[info] - should decode a simple string
1298[info] - should decode a chunked string with UTF-8 multi-byte characters
1299[info] - should handle an empty Source
1300[info] - should handle partial BOM
1301[info] - should handle a string shorter than BOM
1302[info] - should handle empty chunks
1303[info] encodeUtf8
1304[info] - should handle empty String
1305[info] - should encode a string
1306[info] FlowOpsZipWithIndexTest:
1307[info] zipWithIndex
1308[info] - should not zip anything from an empty flow
1309[info] - should zip flow with index
1310[info] ResourceTest:
1311[info] useInScope
1312[2025-12-04T13:27:41.070133576Z] [24] allocate
1313[2025-12-04T13:27:41.070693161Z] [550] release 1
1314[info] - should release resources after allocation
1315[2025-12-04T13:27:41.072259625Z] [24] allocate 1
1316[2025-12-04T13:27:41.072999120Z] [24] allocate 2
1317[2025-12-04T13:27:41.073367850Z] [551] release 2
1318[2025-12-04T13:27:41.073463339Z] [551] release 1
1319[info] - should release resources in reverse order
1320[2025-12-04T13:27:41.075435210Z] [24] allocate 1
1321[2025-12-04T13:27:41.075941576Z] [24] allocate 2
1322[2025-12-04T13:27:41.076757463Z] [552] release 2
1323[2025-12-04T13:27:41.076910009Z] [552] release 1
1324[info] - should release resources when there's an exception
1325[2025-12-04T13:27:41.077604078Z] [24] exception
1326[2025-12-04T13:27:41.078925993Z] [24] allocate 1
1327[2025-12-04T13:27:41.079397104Z] [24] allocate 2
1328[2025-12-04T13:27:41.079908985Z] [553] release 2
1329[2025-12-04T13:27:41.080073433Z] [553] release 1
1330[2025-12-04T13:27:41.080379985Z] [24] exception e2
1331[info] - should release resources when there's an exception during releasing (normal resutl)
1332[2025-12-04T13:27:41.082180255Z] [24] allocate 1
1333[2025-12-04T13:27:41.082672566Z] [24] allocate 2
1334[2025-12-04T13:27:41.082916323Z] [554] release 2
1335[2025-12-04T13:27:41.083388276Z] [554] release 1
1336[2025-12-04T13:27:41.083751636Z] [24] exception e3
1337[info] - should release resources when there's an exception during releasing (exceptional resutl)
1338[2025-12-04T13:27:41.085214485Z] [24] in scope
1339[2025-12-04T13:27:41.085527641Z] [555] release
1340[info] - should release registered resources
1341[2025-12-04T13:27:41.086657556Z] [24] allocate
1342[2025-12-04T13:27:41.086775637Z] [24] in scope
1343[2025-12-04T13:27:41.087619227Z] [556] release
1344[info] - should use a resource
1345[2025-12-04T13:27:41.088698036Z] [24] allocate
1346[2025-12-04T13:27:41.088792192Z] [24] in scope
1347[2025-12-04T13:27:41.089456606Z] [557] release
1348[info] - should use a closeable resource
1349[2025-12-04T13:27:41.090443564Z] [24] allocate
1350[2025-12-04T13:27:41.090533592Z] [24] in scope
1351[2025-12-04T13:27:41.091331185Z] [558] release
1352[2025-12-04T13:27:41.091880965Z] [24] exception e2 (e1)
1353[info] - should add suppressed exception when there's an exception during releasing
1354[info] FlowOpsMapParTest:
1355[info] mapPar
1356[info] - should map over a flow with parallelism limit 1
1357[info] - should map over a flow with parallelism limit 2
1358[info] - should map over a flow with parallelism limit 3
1359[info] - should map over a flow with parallelism limit 4
1360[info] - should map over a flow with parallelism limit 5
1361[info] - should map over a flow with parallelism limit 6
1362[info] - should map over a flow with parallelism limit 7
1363[info] - should map over a flow with parallelism limit 8
1364[info] - should map over a flow with parallelism limit 9
1365[info] - should map over a flow with parallelism limit 10
1366[info] - should map over a flow with parallelism limit 10 (stress test)
1367[info] + iteration 1
1368[info] + iteration 2
1369[info] + iteration 3
1370[info] + iteration 4
1371[info] + iteration 5
1372[info] + iteration 6
1373[info] + iteration 7
1374[info] + iteration 8
1375[info] + iteration 9
1376[info] + iteration 10
1377[info] + iteration 11
1378[info] + iteration 12
1379[info] + iteration 13
1380[info] + iteration 14
1381[info] + iteration 15
1382[info] + iteration 16
1383[info] + iteration 17
1384[info] + iteration 18
1385[info] + iteration 19
1386[info] + iteration 20
1387[info] + iteration 21
1388[info] + iteration 22
1389[info] + iteration 23
1390[info] + iteration 24
1391[info] + iteration 25
1392[info] + iteration 26
1393[info] + iteration 27
1394[info] + iteration 28
1395[info] + iteration 29
1396[info] + iteration 30
1397[info] + iteration 31
1398[info] + iteration 32
1399[info] + iteration 33
1400[info] + iteration 34
1401[info] + iteration 35
1402[info] + iteration 36
1403[info] + iteration 37
1404[info] + iteration 38
1405[info] + iteration 39
1406[info] + iteration 40
1407[info] + iteration 41
1408[info] + iteration 42
1409[info] + iteration 43
1410[info] + iteration 44
1411[info] + iteration 45
1412[info] + iteration 46
1413[info] + iteration 47
1414[info] + iteration 48
1415[info] + iteration 49
1416[info] + iteration 50
1417[info] + iteration 51
1418[info] + iteration 52
1419[info] + iteration 53
1420[info] + iteration 54
1421[info] + iteration 55
1422[info] + iteration 56
1423[info] + iteration 57
1424[info] + iteration 58
1425[info] + iteration 59
1426[info] + iteration 60
1427[info] + iteration 61
1428[info] + iteration 62
1429[info] + iteration 63
1430[info] + iteration 64
1431[info] + iteration 65
1432[info] + iteration 66
1433[info] + iteration 67
1434[info] + iteration 68
1435[info] + iteration 69
1436[info] + iteration 70
1437[info] + iteration 71
1438[info] + iteration 72
1439[info] + iteration 73
1440[info] + iteration 74
1441[info] + iteration 75
1442[info] + iteration 76
1443[info] + iteration 77
1444[info] + iteration 78
1445[info] + iteration 79
1446[info] + iteration 80
1447[info] + iteration 81
1448[info] + iteration 82
1449[info] + iteration 83
1450[info] + iteration 84
1451[info] + iteration 85
1452[info] + iteration 86
1453[info] + iteration 87
1454[info] + iteration 88
1455[info] + iteration 89
1456[info] + iteration 90
1457[info] + iteration 91
1458[info] + iteration 92
1459[info] + iteration 93
1460[info] + iteration 94
1461[info] + iteration 95
1462[info] + iteration 96
1463[info] + iteration 97
1464[info] + iteration 98
1465[info] + iteration 99
1466[info] + iteration 100
1467[info] - should propagate errors
1468[2025-12-04T13:27:49.710952822Z] [1917] done
1469[2025-12-04T13:27:49.710952785Z] [1916] done
1470[2025-12-04T13:27:49.811430887Z] [1919] exception
1471[info] - should cancel other running forks when there's an error
1472[info] - should handle empty flow
1473[info] - should handle flow with exactly parallelism number of elements
1474[info] - should handle flow with less than parallelism number of elements
1475[info] - should preserve order even with varying processing times
1476[info] - should preserve order with random processing times
1477[info] - should work with very high parallelism values
1478[info] SelectOrClosedWithinTest:
1479[info] selectOrClosedWithin
1480[info] - should select a clause that can complete immediately
1481[info] - should return timeout when no clause can complete within the timeout
1482[info] - should select a source that has a value immediately
1483[info] - should return timeout when no source has a value within the timeout
1484[info] - should work with different timeout value types
1485[info] - should handle empty clauses sequence
1486[info] - should handle empty sources sequence
1487[info] selectOrClosedWithin with single clause
1488[info] - should complete when clause is ready
1489[info] - should timeout when clause is not ready
1490[info] selectOrClosedWithin with multiple clauses
1491[info] - should select the first ready clause
1492[info] - should timeout when no clauses are ready
1493[info] selectOrClosedWithin with sources
1494[info] - should select from ready source
1495[info] - should timeout when no sources are ready
1496[info] selectOrClosedWithin error scenarios
1497[info] - should handle channel closed with done
1498[info] - should handle channel closed with error
1499[info] - should prioritize ready channels over closed ones
1500[info] selectOrClosedWithin with different timeout types
1501[info] - should work with various timeout value types
1502[info] selectOrClosedWithin with sequences
1503[info] - should handle empty sequences
1504[info] - should handle sequence of clauses
1505[info] - should handle sequence of sources
1506[info] selectOrClosedWithin with various arities
1507[info] - should work with all supported clause counts
1508[info] - should work with all supported source counts
1509[info] FlowOpsSplitTest:
1510[info] split
1511[info] - should split an empty flow
1512[info] - should split a flow with no delimiters
1513[info] - should split a flow with delimiter at the beginning
1514[info] - should split a flow with delimiter at the end
1515[info] - should split a flow with delimiter in the middle
1516[info] - should split a flow with multiple delimiters
1517[info] - should split a flow with adjacent delimiters
1518[info] - should split a flow with only delimiters
1519[info] - should split a flow with single delimiter
1520[info] - should split a flow with single non-delimiter
1521[info] - should split a flow with multiple consecutive delimiters at the beginning
1522[info] - should split a flow with multiple consecutive delimiters at the end
1523[info] - should split a flow with string delimiters
1524[info] - should split a flow using complex predicate
1525[info] - should handle error propagation
1526[info] - should split a large flow efficiently
1527[info] JitterTest:
1528[info] Jitter
1529[info] - should use no jitter
1530[info] - should use full jitter
1531[info] - should use equal jitter
1532[info] - should use decorrelated jitter
1533[info] FlowOpsAlsoToTest:
1534[info] alsoTo
1535[info] - should send to both sinks
1536[info] - should send to both sinks and not hang when other sink is rendezvous channel
1537[info] - should close main flow when other closes
1538[info] - should close main flow with error when other errors
1539[info] - should close other channel with error when main errors
1540[info] FlowOpsBufferTest:
1541[info] buffer
1542[info] - should work with a single async boundary
1543[info] - should work with multiple async boundaries
1544[info] - should propagate errors
1545[info] BackoffRetryTest:
1546[info] Backoff retry
1547[info] - should retry a function
1548[info] - should retry a failing function forever
1549[info] - should respect maximum delay
1550[info] - should use jitter
1551[info] - should retry an Either
1552[info] FlowOpsEnsureTest:
1553[info] ensure.onComplete
1554[info] - should run in case of success
1555[info] - should run in case of error
1556[info] ensure.onDone
1557[info] - should run in case of success
1558[info] - should not run in case of error
1559[info] ensure.onError
1560[info] - should not run in case of success
1561[info] - should run in case of error
1562[info] FlowOpsTakeLastTest:
1563[info] takeLast
1564[info] - should throw ChannelClosedException.Error for source failed without exception
1565[info] - should fail to takeLast when n < 0
1566[info] - should return empty list for the empty source
1567[info] - should return empty list when n == 0 and list is not empty
1568[info] - should return list with all elements if the source is smaller than requested number
1569[info] - should return the last n elements from the source
1570[info] FlowOpsZipAllTest:
1571[info] zipAll
1572[info] - should not emit any element when both flows are empty
1573[info] - should emit this element when other flow is empty
1574[info] - should emit other element when this flow is empty
1575[info] - should emit matching elements when both flows are of the same size
1576[info] - should emit default for other flow if this flow is longer
1577[info] - should emit default for this flow if other flow is longer
1578[info] FlowPublisherTckTest:
1579[info] - required_createPublisher1MustProduceAStreamOfExactly1Element
1580[info] - required_createPublisher3MustProduceAStreamOfExactly3Elements
1581[info] - required_validate_maxElementsFromPublisher
1582[info] - required_validate_boundedDepthOfOnNextAndRequestRecursion
1583[info] - required_spec101_subscriptionRequestMustResultInTheCorrectNumberOfProducedElements
1584[info] - required_spec102_maySignalLessThanRequestedAndTerminateSubscription
1585[info] - stochastic_spec103_mustSignalOnMethodsSequentially
1586[info] - optional_spec104_mustSignalOnErrorWhenFails
1587[info] - required_spec105_mustSignalOnCompleteWhenFiniteStreamTerminates
1588[info] - optional_spec105_emptyStreamMustTerminateBySignallingOnComplete
1589[info] - required_spec107_mustNotEmitFurtherSignalsOnceOnCompleteHasBeenSignalled
1590[info] - untested_spec107_mustNotEmitFurtherSignalsOnceOnErrorHasBeenSignalled !!! IGNORED !!!
1591[info] - untested_spec109_subscribeShouldNotThrowNonFatalThrowable !!! IGNORED !!!
1592[info] - required_spec109_subscribeThrowNPEOnNullSubscriber
1593[info] - required_spec109_mustIssueOnSubscribeForNonNullSubscriber
1594[info] - required_spec109_mayRejectCallsToSubscribeIfPublisherIsUnableOrUnwillingToServeThemRejectionMustTriggerOnErrorAfterOnSubscribe
1595[info] - untested_spec110_rejectASubscriptionRequestIfTheSameSubscriberSubscribesTwice !!! IGNORED !!!
1596[info] - optional_spec111_maySupportMultiSubscribe
1597[info] - optional_spec111_registeredSubscribersMustReceiveOnNextOrOnCompleteSignals
1598[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingOneByOne
1599[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfront
1600[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfrontAndCompleteAsExpected
1601[info] - required_spec302_mustAllowSynchronousRequestCallsFromOnNextAndOnSubscribe
1602[info] - required_spec303_mustNotAllowUnboundedRecursion
1603[info] - untested_spec304_requestShouldNotPerformHeavyComputations !!! IGNORED !!!
1604[info] - untested_spec305_cancelMustNotSynchronouslyPerformHeavyComputation !!! IGNORED !!!
1605[info] - required_spec306_afterSubscriptionIsCancelledRequestMustBeNops
1606[info] - required_spec307_afterSubscriptionIsCancelledAdditionalCancelationsMustBeNops
1607[info] - required_spec309_requestZeroMustSignalIllegalArgumentException
1608[info] - required_spec309_requestNegativeNumberMustSignalIllegalArgumentException
1609[info] - required_spec312_cancelMustMakeThePublisherToEventuallyStopSignaling
1610[info] - required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber
1611[info] - required_spec317_mustSupportAPendingElementCountUpToLongMaxValue
1612[info] - required_spec317_mustSupportACumulativePendingElementCountUpToLongMaxValue
1613[info] - required_spec317_mustNotSignalOnErrorWhenPendingAboveLongMaxValue
1614[info] - optional_spec309_requestNegativeNumberMaySignalIllegalArgumentExceptionWithSpecificMessage
1615[info] - untested_spec108_possiblyCanceledSubscriptionShouldNotReceiveOnErrorOrOnCompleteSignals !!! IGNORED !!!
1616[info] - untested_spec106_mustConsiderSubscriptionCancelledAfterOnErrorOrOnCompleteHasBeenCalled !!! IGNORED !!!
1617[info] SourceOpsFactoryMethodsTest:
1618[info] Source factory methods
1619[info] - should create a source from a fork
1620[info] CancelTest:
1621[info] cancel
1622[2025-12-04T13:27:58.019553368Z] [2410] started
1623[2025-12-04T13:27:58.119925082Z] [2410] interrupted
1624[2025-12-04T13:27:58.620357424Z] [2410] interrupted done
1625[2025-12-04T13:27:58.620713275Z] [2408] cancel done
1626[info] - should block until the fork completes
1627[2025-12-04T13:27:59.623550949Z] [2411] cancel done
1628[2025-12-04T13:27:59.725517185Z] [2416] interrupted
1629[2025-12-04T13:27:59.825883273Z] [2416] interrupted done
1630[2025-12-04T13:27:59.826133729Z] [2414] cancel done
1631[2025-12-04T13:27:59.926770786Z] [2417] cancel done
1632[2025-12-04T13:28:00.028619422Z] [2422] interrupted
1633[2025-12-04T13:28:00.128956355Z] [2422] interrupted done
1634[2025-12-04T13:28:00.129147310Z] [2420] cancel done
1635[2025-12-04T13:28:00.229777253Z] [2423] cancel done
1636[2025-12-04T13:28:00.331686147Z] [2428] interrupted
1637[2025-12-04T13:28:00.432059901Z] [2428] interrupted done
1638[2025-12-04T13:28:00.432294891Z] [2426] cancel done
1639[2025-12-04T13:28:00.532906759Z] [2429] cancel done
1640[2025-12-04T13:28:00.634749330Z] [2434] interrupted
1641[2025-12-04T13:28:00.735092907Z] [2434] interrupted done
1642[2025-12-04T13:28:00.735310807Z] [2432] cancel done
1643[2025-12-04T13:28:00.835926511Z] [2435] cancel done
1644[2025-12-04T13:28:00.937811771Z] [2440] interrupted
1645[2025-12-04T13:28:01.038172418Z] [2440] interrupted done
1646[2025-12-04T13:28:01.038389838Z] [2438] cancel done
1647[2025-12-04T13:28:01.139026330Z] [2441] cancel done
1648[2025-12-04T13:28:01.240926291Z] [2446] interrupted
1649[2025-12-04T13:28:01.341276971Z] [2446] interrupted done
1650[2025-12-04T13:28:01.341502991Z] [2444] cancel done
1651[2025-12-04T13:28:01.442102435Z] [2447] cancel done
1652[2025-12-04T13:28:01.543958252Z] [2452] interrupted
1653[2025-12-04T13:28:01.644303782Z] [2452] interrupted done
1654[2025-12-04T13:28:01.644516890Z] [2450] cancel done
1655[2025-12-04T13:28:01.745176598Z] [2453] cancel done
1656[2025-12-04T13:28:01.847029799Z] [2458] interrupted
1657[2025-12-04T13:28:01.947322950Z] [2458] interrupted done
1658[2025-12-04T13:28:01.947522193Z] [2456] cancel done
1659[2025-12-04T13:28:02.048140811Z] [2459] cancel done
1660[2025-12-04T13:28:02.150038730Z] [2464] interrupted
1661[2025-12-04T13:28:02.250411560Z] [2464] interrupted done
1662[2025-12-04T13:28:02.250672809Z] [2462] cancel done
1663[2025-12-04T13:28:02.351348798Z] [2465] cancel done
1664[2025-12-04T13:28:02.453244431Z] [2470] interrupted
1665[2025-12-04T13:28:02.553628199Z] [2470] interrupted done
1666[2025-12-04T13:28:02.553895769Z] [2468] cancel done
1667[info] - should block until the fork completes (stress test)
1668[info] + iteration 1
1669[info] + iteration 2
1670[info] + iteration 3
1671[info] + iteration 4
1672[info] + iteration 5
1673[info] + iteration 6
1674[info] + iteration 7
1675[info] + iteration 8
1676[info] + iteration 9
1677[info] + iteration 10
1678[info] + iteration 11
1679[info] + iteration 12
1680[info] + iteration 13
1681[info] + iteration 14
1682[info] + iteration 15
1683[info] + iteration 16
1684[info] + iteration 17
1685[info] + iteration 18
1686[info] + iteration 19
1687[info] + iteration 20
1688[info] cancelNow
1689[2025-12-04T13:28:02.757891992Z] [2471] cancel done
1690[2025-12-04T13:28:03.258236821Z] [2473] interrupted done
1691[info] - should return immediately, and wait for forks when scope completes
1692[info] - should (when followed by a joinEither) catch InterruptedException with which a fork ends
1693[info] FlowOpsTapTest:
1694[info] - should tap over a flow
1695[info] FlowOpsAlsoToTapTest:
1696[info] alsoToTap
1697[info] - should send to both sinks when other is faster
1698[info] - should send to both sinks when other is slower
1699[info] - should not fail the flow when the other sink fails
1700[info] - should not close the flow when the other sink closes
1701[info] SourceOpsFailedTest:
1702[info] Source.failed
1703[info] - should fail on receive
1704[info] - should be in error
1705[info] FlowOpsDebounceTest:
1706[info] debounce
1707[info] - should not debounce if applied on an empty flow
1708[info] - should not debounce if applied on a flow containing only distinct values
1709[info] - should debounce if applied on a flow containing only repeating values
1710[info] - should debounce if applied on a flow containing repeating elements
1711[info] FlowOpsThrottleTest:
1712[info] throttle
1713[info] - should not throttle the empty source
1714[info] - should throttle to specified elements per time units
1715[info] - should fail to throttle when elements <= 0
1716[info] - should fail to throttle when per lower than 1ms
1717[info] FlowOpsRunToChannelTest:
1718[info] runToChannel
1719[info] - should receive the elements in the flow
1720[info] - should return the original source when running a source-backed flow
1721[info] FlowOpsTimeoutTest:
1722[info] - should timeout
1723[info] FlowOpsZipTest:
1724[info] - should zip two sources
1725[info] FixedRateRepeatTest:
1726[info] repeat
1727[info] - should repeat a function at fixed rate
1728[info] - should repeat a function at fixed rate with initial delay
1729[info] - should repeat a function forever at fixed rate
1730[info] - should repeat a function forever at fixed rate with initial delay
1731[info] ForeachParTest:
1732[info] foreachPar
1733[2025-12-04T13:28:05.390270778Z] [2505] 3
1734[2025-12-04T13:28:05.390237974Z] [2502] 0
1735[2025-12-04T13:28:05.390257260Z] [2503] 1
1736[2025-12-04T13:28:05.390225137Z] [2504] 2
1737[2025-12-04T13:28:05.390317099Z] [2506] 4
1738[2025-12-04T13:28:05.490748771Z] [2509] 7
1739[2025-12-04T13:28:05.490747551Z] [2508] 6
1740[2025-12-04T13:28:05.490732245Z] [2507] 5
1741[2025-12-04T13:28:05.490777567Z] [2510] 8
1742[2025-12-04T13:28:05.490830830Z] [2511] 9
1743[2025-12-04T13:28:05.591222059Z] [2512] 10
1744[2025-12-04T13:28:05.591222102Z] [2513] 11
1745[2025-12-04T13:28:05.591263301Z] [2515] 13
1746[2025-12-04T13:28:05.591253791Z] [2514] 12
1747[2025-12-04T13:28:05.591837163Z] [2516] 14
1748[2025-12-04T13:28:05.691783485Z] [2519] 17
1749[2025-12-04T13:28:05.691765741Z] [2518] 16
1750[2025-12-04T13:28:05.691729897Z] [2517] 15
1751[info] - should run computations in parallel
1752[info] - should run not more computations than limit
1753[2025-12-04T13:28:06.027110341Z] [2684] exception
1754[2025-12-04T13:28:06.027689821Z] [24] catch
1755[2025-12-04T13:28:06.327877498Z] [24] all done
1756[info] - should interrupt other computations in one fails
1757[info] FlowOpsFutureSourceTest:
1758[info] futureSource
1759[info] - should return the original future failure when future fails
1760[info] - should return future's source values
1761[info] SourceOpsTransformTest:
1762[info] Source.transform
1763[info] - should transform a source using a simple map
1764[info] - should transform a source using a complex chain of operations
1765[info] - should transform an infinite source
1766[info] - should transform an infinite source (stress test)
1767[info] RateLimiterTest:
1768[info] fixed rate RateLimiter
1769[info] - should drop operation when rate limit is exceeded
1770[info] - should restart rate limiter after given duration
1771[info] - should block operation when rate limit is exceeded
1772[info] - should respect time constraints when blocking
1773[info] - should respect time constraints when blocking concurrently
1774[info] - should allow to run more long running operations concurrently than max rate when not considering operation's time
1775[info] - should not allow to run more long running operations concurrently than max rate when considering operation time
1776[info] sliding window RateLimiter
1777[info] - should drop operation when rate limit is exceeded
1778[info] - should restart rate limiter after given duration
1779[info] - should block operation when rate limit is exceeded
1780[info] - should respect time constraints when blocking
1781[info] - should respect time constraints when blocking concurrently
1782[info] - should not allow to run more operations when operations are still running when considering operation time
1783[info] - should not allow to run more operations when operations are still running in window span when considering operation time
1784[info] bucket RateLimiter
1785[info] - should drop operation when rate limit is exceeded
1786[info] - should refill token after time elapsed from last refill and not before
1787[info] - should block operation when rate limit is exceeded
1788[info] - should respect time constraints when blocking
1789[info] - should respect time constraints when blocking concurrently
1790[info] FlowOpsSplitOnTest:
1791[info] splitOn
1792[info] - should split an empty flow
1793[info] - should split a flow with no delimiters
1794[info] - should split a flow with single-element delimiter at the beginning
1795[info] - should split a flow with single-element delimiter at the end
1796[info] - should split a flow with single-element delimiter in the middle
1797[info] - should split a flow with multiple single-element delimiters
1798[info] - should split a flow with adjacent single-element delimiters
1799[info] - should split a flow with only single-element delimiters
1800[info] - should split a flow with multi-element delimiter at the beginning
1801[info] - should split a flow with multi-element delimiter at the end
1802[info] - should split a flow with multi-element delimiter in the middle
1803[info] - should split a flow with multiple multi-element delimiters
1804[info] - should split a flow with adjacent multi-element delimiters
1805[info] - should split a flow with only multi-element delimiters
1806[info] - should split a flow with overlapping patterns
1807[info] - should split a flow with complex overlapping patterns
1808[info] - should handle empty delimiter by returning entire input as single chunk
1809[info] - should handle empty delimiter with empty input
1810[info] - should split a flow with string elements
1811[info] - should split a flow with multi-element string delimiter
1812[info] - should handle delimiter longer than input
1813[info] - should handle single element matching start of multi-element delimiter
1814[info] - should handle partial delimiter match at end
1815[info] - should split with delimiter that appears multiple times in sequence
1816[info] - should handle error propagation
1817[info] - should split a large flow efficiently
1818[info] - should handle repeated delimiter pattern correctly
1819[info] - should properly split when given a flow with delimiter patterns
1820[info] - should handle erroneous scenarios when delimiter processing fails
1821[info] FlowOpsMergeTest:
1822[info] merge
1823[info] - should merge two simple flows
1824[info] - should merge two async flows
1825[info] - should merge with a tick flow
1826[info] - should propagate error from the left
1827[info] - should propagate error from the right
1828[info] - should merge two flows, emitting all elements from the left when right completes
1829[info] - should merge two flows, emitting all elements from the right when left completes
1830[info] - should merge two flows, completing the resulting flow when the left flow completes
1831[info] - should merge two flows, completing the resulting flow when the right flow completes
1832[info] FlowOpsFlatMapTest:
1833[info] flatMap
1834[info] - should flatten simple flows
1835[info] - should propagate errors
1836[info] FlowCompanionIOOpsTest:
1837[info] fromInputStream
1838[info] - should handle an empty InputStream
1839[info] - should handle InputStream shorter than buffer size
1840[info] - should handle InputStream longer than buffer size
1841[info] - should close the InputStream after reading it
1842[info] - should close the InputStream after failing with an exception
1843[info] fromFile
1844[info] - should read content from a file smaller than chunk size
1845[info] - should read content from a file larger than chunk size
1846[info] - should handle an empty file
1847[info] - should throw an exception for missing file
1848[info] - should throw an exception if path is a directory
1849[info] CollectParTest:
1850[info] collectPar
1851[info] - should output the same type as input
1852[info] - should run computations in parallel
1853[info] - should run not more computations than limit
1854[2025-12-04T13:28:50.673050606Z] [5995] exception
1855[2025-12-04T13:28:50.673746566Z] [24] catch
1856[2025-12-04T13:28:50.973910020Z] [24] all done
1857[info] - should interrupt other computations in one fails
1858[info] FlowOpsFutureTest:
1859[info] future
1860[info] - should return the original future failure when future fails
1861[info] - should return future value
1862[info] FlowOpsInterleaveTest:
1863[info] interleave
1864[info] - should interleave with an empty source
1865[info] - should interleave two sources with default segment size
1866[info] - should interleave two sources with default segment size and different lengths
1867[info] - should interleave two sources with custom segment size
1868[info] - should interleave two sources with custom segment size and different lengths
1869[info] - should interleave two sources with different lengths and complete eagerly
1870[info] - should when empty, interleave with a non-empty source and complete eagerly
1871[info] - should interleave with an empty source and complete eagerly
1872[info] ParTest:
1873[info] par
1874[2025-12-04T13:28:51.093163434Z] [6025] b
1875[2025-12-04T13:28:51.193175700Z] [6024] a
1876[2025-12-04T13:28:51.193475625Z] [24] done
1877[info] - should run computations in parallel
1878[2025-12-04T13:28:51.295370420Z] [6028] exception
1879[2025-12-04T13:28:51.295868777Z] [24] catch
1880[2025-12-04T13:28:51.596085339Z] [24] all done
1881[info] - should interrupt other computations in one fails
1882[info] parLimit
1883[info] - should run up to the given number of computations in parallel
1884[2025-12-04T13:28:52.302125423Z] [6041] x
1885[2025-12-04T13:28:52.302118537Z] [6040] x
1886[2025-12-04T13:28:52.312495766Z] [6043] exception
1887[2025-12-04T13:28:52.312910913Z] [24] catch
1888[2025-12-04T13:28:52.613097348Z] [24] all done
1889[info] - should interrupt other computations in one fails
1890[info] parEither
1891[2025-12-04T13:28:52.714906538Z] [6047] b
1892[2025-12-04T13:28:52.814888176Z] [6046] a
1893[2025-12-04T13:28:52.815644615Z] [24] done
1894[info] - should run computations in parallel
1895[2025-12-04T13:28:52.916918831Z] [6050] exception
1896[2025-12-04T13:28:53.217653086Z] [24] all done
1897[info] - should interrupt other computations in one fails
1898[info] SelectWithinTest:
1899[info] selectWithin
1900[info] - should select a clause that can complete immediately
1901[info] - should throw TimeoutException when no clause can complete within the timeout
1902[info] - should select a source that has a value immediately
1903[info] - should throw TimeoutException when no source has a value within the timeout
1904[info] - should work with single clause
1905[info] - should work with three clauses
1906[info] - should work with four clauses
1907[info] - should work with five clauses
1908[info] - should work with sequence of clauses
1909[info] selectWithin with sources
1910[info] - should work with single source
1911[info] - should work with two sources
1912[info] - should work with three sources
1913[info] - should work with four sources
1914[info] - should work with five sources
1915[info] - should work with sequence of sources
1916[info] selectWithin timeout scenarios
1917[info] - should throw TimeoutException for single clause timeout
1918[info] - should throw TimeoutException for single source timeout
1919[info] - should throw TimeoutException for sequence of clauses timeout
1920[info] - should throw TimeoutException for sequence of sources timeout
1921[info] - should throw TimeoutException immediately for empty sequence of clauses
1922[info] - should throw TimeoutException immediately for empty sequence of sources
1923[info] selectWithin error scenarios
1924[info] - should throw ChannelClosedException when channel is closed with done
1925[info] - should throw ChannelClosedException when channel is closed with error
1926[info] - should prioritize ready channels over closed ones
1927[info] selectWithin performance
1928[info] - should not timeout when clause can complete immediately
1929[info] - should respect timeout duration
1930[info] selectWithin with send clauses
1931[info] - should work with send clauses
1932[info] - should throw TimeoutException when send clauses cannot complete
1933[info] ImmediateRepeatTest:
1934[info] repeat
1935[info] - should repeat a function immediately
1936[info] - should repeat a function immediately with initial delay
1937[info] - should repeat a function immediately forever
1938[info] - should repeat a function immediately forever with initial delay
1939[info] FlowOpsDebounceByTest:
1940[info] debounceBy
1941[info] - should not debounce if applied on an empty flow
1942[info] - should not debounce if applied on a flow containing only distinct f(value)
1943[info] - should debounce if applied on a flow containing repeating f(value)
1944[info] - should debounce subsequent odd/prime numbers
1945[info] RaceTest:
1946[info] timeout
1947[2025-12-04T13:28:54.592739935Z] [24] timeout
1948[2025-12-04T13:28:54.592865771Z] [24] done
1949[info] - should short-circuit a long computation
1950[2025-12-04T13:28:57.094322330Z] [6112] no timeout
1951[2025-12-04T13:28:57.094702512Z] [24] done
1952[info] - should not interrupt a short computation
1953[info] timeoutOption
1954[2025-12-04T13:29:00.096547276Z] [24] done: None
1955[info] - should short-circuit a long computation
1956[info] race
1957[2025-12-04T13:29:02.598546768Z] [6117] fast
1958[info] - should race a slower and faster computation
1959[2025-12-04T13:29:04.101094086Z] [6118] fast
1960[info] - should race a faster and slower computation
1961[2025-12-04T13:29:05.303296531Z] [6120] error
1962[2025-12-04T13:29:05.603322219Z] [6121] slow
1963[info] - should return the first successful computation to complete
1964[info] - should add other exceptions as suppressed
1965[info] - should treat ControlThrowable as a non-fatal exception
1966[info] - should immediately rethrow other fatal exceptions
1967[info] raceEither
1968[2025-12-04T13:29:07.209979541Z] [6131] error
1969[2025-12-04T13:29:07.509972075Z] [6132] slow
1970[info] - should return the first successful computation to complete
1971[info] raceResult
1972[info] - should immediately return when a normal exception occurs
1973[info] - should immediately return when a control exception occurs
1974[info] - should immediately return when a fatal exception occurs
1975[info] SourceOpsFutureSourceTest:
1976[info] SourceOps.futureSource
1977[info] - should return the original future failure when future fails
1978[info] - should return the original future failure when future fails with ExecutionException
1979[info] - should return future's source values
1980[info] FilterParTest:
1981[info] filterPar
1982[info] - should output the same type as input
1983[info] - should run computations in parallel
1984[info] - should run not more computations than limit
1985[2025-12-04T13:29:09.257967433Z] [6337] exception
1986[2025-12-04T13:29:09.258459266Z] [24] catch
1987[2025-12-04T13:29:09.558665374Z] [24] all done
1988[info] - should interrupt other computations in one fails
1989[info] SourceOpsForeachTest:
1990[info] Source.foreach
1991[info] - should iterate over a source
1992[info] - should iterate over a source using for-syntax
1993[info] - should convert source to a list
1994[info] FlowOpsEmptyTest:
1995[info] empty
1996[info] - should be empty
1997[info] SupervisedTest:
1998[info] supervised
1999[2025-12-04T13:29:09.667719195Z] [6343] b
2000[2025-12-04T13:29:09.767617435Z] [6342] a
2001[2025-12-04T13:29:09.767831869Z] [24] done
2002[info] - should wait until all forks complete
2003[2025-12-04T13:29:09.869452024Z] [6346] b
2004[2025-12-04T13:29:09.869800173Z] [24] done
2005[info] - should only wait until user forks complete
2006[2025-12-04T13:29:09.971585374Z] [6350] b
2007[2025-12-04T13:29:10.072001212Z] [24] done
2008[info] - should interrupt once any fork ends with an exception
2009[2025-12-04T13:29:10.273913339Z] [24] done
2010[info] - should interrupt main body once a fork ends with an exception
2011[2025-12-04T13:29:10.375585412Z] [6356] b
2012[2025-12-04T13:29:10.575317738Z] [6354] a
2013[2025-12-04T13:29:10.575629249Z] [24] done
2014[info] - should not interrupt if an unsupervised fork ends with an exception
2015[info] - should handle interruption of multiple forks with `joinEither` correctly
2016[info] SourceOpsFutureTest:
2017[info] Source.future
2018[info] - should return the original future failure when future fails
2019[info] - should return the original future failure when future fails with ExecutionException
2020[info] - should return future value
2021[info] CircuitBreakerStateMachineTest:
2022[info] Circuit Breaker state machine
2023[info] - should keep closed with healthy metrics
2024[info] - should go to open after surpassing failure threshold
2025[info] - should go straight to half open after surpassing failure threshold with defined waitDurationOpenState = 0
2026[info] - should go back to open after timeout in half open passed
2027[info] - should update counter of completed operations in halfOpen state
2028[info] - should go back to closed after enough calls with good metrics are recorded
2029[info] - should go to open after enough calls with bad metrics are recorded in halfOpen state
2030[info] - should go to closed after enough calls with good metrics are recorded in halfOpen state
2031[info] - should go to half open after waitDurationOpenState passes
2032[info] OxAppTest:
2033[info] OxApp
2034[info] - should work in happy case
2035[info] OxApp
2036Clean shutdown timed out after 100 milliseconds, exiting.
2037[info] - should shutdown despite cleanup taking a long time
2038[info] OxApp
2039[info] - should work in interrupted case
2040[info] OxApp
2041[info] - should work in failed case
2042[info] OxApp
2043[info] - should report any non-interrupted exceptions that occur during shutdown
2044[info] OxApp.Simple
2045[info] - should work in happy case
2046[info] OxApp.Simple
2047[info] - should work in interrupted case
2048[info] OxApp.Simple
2049[info] - should work in failed case
2050[info] OxApp.WithErrors
2051[info] - should work in happy case
2052[info] OxApp.WithErrors
2053[info] - should work in interrupted case
2054[info] OxApp.WithErrors
2055[info] - should work in failed case
2056[info] FlowOpsMapStatefulConcatTest:
2057[info] mapStatefulConcat
2058[info] - should deduplicate
2059[info] - should count consecutive
2060[info] - should propagate errors in the mapping function
2061[info] - should propagate errors in the completion callback
2062[info] FlowOpsDropTest:
2063[info] drop
2064[info] - should not drop from the empty flow
2065[info] - should drop elements from the source
2066[info] - should return empty source when more elements than source length was dropped
2067[info] - should not drop when 'n == 0'
2068[info] FlowOpsRepeatEvalTest:
2069[info] repeatEval
2070[info] - should evaluate the element before each send
2071[info] - should evaluate the element before each send, as long as it's defined
2072[info] FlowPublisherPekkoTest:
2073[INFO] [12/04/2025 14:29:11.366] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2074[info] - a simple flow should emit elements to be processed by a pekko stream
2075[INFO] [12/04/2025 14:29:11.615] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2076[info] - a concurrent flow should emit elements to be processed by a pekko stream
2077[INFO] [12/04/2025 14:29:11.655] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2078[info] - create a flow from a simple publisher
2079[INFO] [12/04/2025 14:29:11.937] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2080[info] - create a flow from a concurrent publisher
2081[info] ImmediateRetryTest:
2082[info] Immediate retry
2083[info] - should retry a succeeding function
2084[info] - should fail fast when a function is not worth retrying
2085[info] - should retry a succeeding function with a custom success condition
2086[info] - should retry a failing function
2087[info] - should retry a failing function forever
2088[info] - should retry a succeeding Either
2089[info] - should fail fast when an Either is not worth retrying
2090[info] - should retry a succeeding Either with a custom success condition
2091[info] - should retry a failing Either
2092[info] Adaptive retry with immediate config
2093[info] - should retry a failing adaptive
2094[info] - should stop retrying after emptying bucket
2095[info] - should not pay exceptionCost if result T is going to be retried and shouldPayPenaltyCost returns false
2096[info] FlowOpsGroupByTest:
2097[info] groupBy
2098[info] - should handle empty flow
2099[info] - should handle single-element flow
2100[info] - should handle single-element flow (stress test)
2101[info] - should create simple groups without reaching parallelism limit
2102[info] - should complete groups when the parallelism limit is reached
2103[info] - should not exceed the parallelism limit, completing earliest-active child flows as done when necessary
2104[info] - should handle large flows
2105[info] - should handle non-integer grouping keys
2106[info] - should group when child processing is slow
2107[info] - should propagate errors from child flows
2108[info] - should propagate errors from child flows when the parent is blocked on sending
2109[info] - should RuntimeException errors from parent flows
2110[info] - should throw an IllegalStateException when a child stream is completed by user-provided transformation
2111[info] FlowOpsLastOptionTest:
2112[info] lastOption
2113[info] - should return None for the empty flow
2114[info] - should return Some for a non-empty
2115[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
2116[info] FlowOpsMapParUnorderedTest:
2117[info] mapParUnordered
2118[info] - should map over a source with parallelism limit 1
2119[info] - should map over a source with parallelism limit 2
2120[info] - should map over a source with parallelism limit 3
2121[info] - should map over a source with parallelism limit 4
2122[info] - should map over a source with parallelism limit 5
2123[info] - should map over a source with parallelism limit 6
2124[info] - should map over a source with parallelism limit 7
2125[info] - should map over a source with parallelism limit 8
2126[info] - should map over a source with parallelism limit 9
2127[info] - should map over a source with parallelism limit 10
2128[info] - should map over a source with parallelism limit 10 (stress test)
2129[info] + iteration 1
2130[info] + iteration 2
2131[info] + iteration 3
2132[info] + iteration 4
2133[info] + iteration 5
2134[info] + iteration 6
2135[info] + iteration 7
2136[info] + iteration 8
2137[info] + iteration 9
2138[info] + iteration 10
2139[info] + iteration 11
2140[info] + iteration 12
2141[info] + iteration 13
2142[info] + iteration 14
2143[info] + iteration 15
2144[info] + iteration 16
2145[info] + iteration 17
2146[info] + iteration 18
2147[info] + iteration 19
2148[info] + iteration 20
2149[info] + iteration 21
2150[info] + iteration 22
2151[info] + iteration 23
2152[info] + iteration 24
2153[info] + iteration 25
2154[info] + iteration 26
2155[info] + iteration 27
2156[info] + iteration 28
2157[info] + iteration 29
2158[info] + iteration 30
2159[info] + iteration 31
2160[info] + iteration 32
2161[info] + iteration 33
2162[info] + iteration 34
2163[info] + iteration 35
2164[info] + iteration 36
2165[info] + iteration 37
2166[info] + iteration 38
2167[info] + iteration 39
2168[info] + iteration 40
2169[info] + iteration 41
2170[info] + iteration 42
2171[info] + iteration 43
2172[info] + iteration 44
2173[info] + iteration 45
2174[info] + iteration 46
2175[info] + iteration 47
2176[info] + iteration 48
2177[info] + iteration 49
2178[info] + iteration 50
2179[info] + iteration 51
2180[info] + iteration 52
2181[info] + iteration 53
2182[info] + iteration 54
2183[info] + iteration 55
2184[info] + iteration 56
2185[info] + iteration 57
2186[info] + iteration 58
2187[info] + iteration 59
2188[info] + iteration 60
2189[info] + iteration 61
2190[info] + iteration 62
2191[info] + iteration 63
2192[info] + iteration 64
2193[info] + iteration 65
2194[info] + iteration 66
2195[info] + iteration 67
2196[info] + iteration 68
2197[info] + iteration 69
2198[info] + iteration 70
2199[info] + iteration 71
2200[info] + iteration 72
2201[info] + iteration 73
2202[info] + iteration 74
2203[info] + iteration 75
2204[info] + iteration 76
2205[info] + iteration 77
2206[info] + iteration 78
2207[info] + iteration 79
2208[info] + iteration 80
2209[info] + iteration 81
2210[info] + iteration 82
2211[info] + iteration 83
2212[info] + iteration 84
2213[info] + iteration 85
2214[info] + iteration 86
2215[info] + iteration 87
2216[info] + iteration 88
2217[info] + iteration 89
2218[info] + iteration 90
2219[info] + iteration 91
2220[info] + iteration 92
2221[info] + iteration 93
2222[info] + iteration 94
2223[info] + iteration 95
2224[info] + iteration 96
2225[info] + iteration 97
2226[info] + iteration 98
2227[info] + iteration 99
2228[info] + iteration 100
2229[info] - should propagate errors
2230[2025-12-04T13:29:27.800450645Z] [208016] done
2231[2025-12-04T13:29:27.800450647Z] [208015] done
2232[2025-12-04T13:29:27.901945095Z] [208018] exception
2233[info] - should complete running forks and not start new ones when the mapping function fails
2234[2025-12-04T13:29:28.205018992Z] [208022] 1
2235[2025-12-04T13:29:28.205014571Z] [208023] 2
2236[info] - should complete running forks and not start new ones when the upstream fails
2237[2025-12-04T13:29:28.619118241Z] [208029] done
2238[2025-12-04T13:29:28.619125838Z] [208030] done
2239[2025-12-04T13:29:28.719614011Z] [208032] exception
2240[info] - should cancel running forks when the surrounding scope closes due to an error
2241[info] - should emit downstream as soon as a value is ready, regardless of the incoming order
2242[info] ControlTest:
2243[info] timeout
2244[2025-12-04T13:29:30.226705446Z] [24] timeout
2245[2025-12-04T13:29:30.226833956Z] [24] done
2246[info] - should short-circuit a long computation
2247[info] - should pass through the exception of failed computation
2248[2025-12-04T13:29:32.329400900Z] [208046] no timeout
2249[2025-12-04T13:29:32.329856343Z] [24] done
2250[info] - should not interrupt a short computation
2251[2025-12-04T13:29:34.731369387Z] [208048] done
2252[info] - should block a thread indefinitely
2253[info] timeoutOption
2254[info] - should pass through the exception of failed computation
2255[info] timeoutEither
2256[info] - should pass through the exception of failed computation
2257[info] FlowOpsConcatTest:
2258[info] - should concatenate flows
2259[info] - should concatenate flows using ++
2260[info] - should not evaluate subsequent flows if there's a failure
2261[info] FlowOpsFactoryMethodsTest:
2262[info] factory methods
2263[info] - should create a flow from a fork
2264[info] - should create an iterating flow
2265[info] - should unfold a function
2266[info] - should produce a range
2267[info] ChannelTest:
2268[info] channel with capacity 0
2269[info] - should send and receive two spaced elements
2270[info] - should send and receive many elements, with concurrent senders & receivers
2271[info] - should select from two receives, if the last one has elements
2272[info] - should select from three receives, if the last one has elements
2273[info] - should select a receive from multiple channels
2274[info] - should select a receive until all channels are done
2275[info] - should properly report channel state
2276[info] - should select from a non-done channel, if a value is immediately available
2277[info] - should select a done channel, when the channel is done immediately
2278[info] - should select a done channel, when the channel becomes done
2279[info] channel with capacity 1
2280[info] - should send and receive two spaced elements
2281[info] - should send and receive many elements, with concurrent senders & receivers
2282[info] - should select from two receives, if the last one has elements
2283[info] - should select from three receives, if the last one has elements
2284[info] - should select a receive from multiple channels
2285[info] - should select a receive until all channels are done
2286[info] - should properly report channel state
2287[info] - should select from a non-done channel, if a value is immediately available
2288[info] - should select a done channel, when the channel is done immediately
2289[info] - should select a done channel, when the channel becomes done
2290[info] channel with capacity 2
2291[info] - should send and receive two spaced elements
2292[info] - should send and receive many elements, with concurrent senders & receivers
2293[info] - should select from two receives, if the last one has elements
2294[info] - should select from three receives, if the last one has elements
2295[info] - should select a receive from multiple channels
2296[info] - should select a receive until all channels are done
2297[info] - should properly report channel state
2298[info] - should select from a non-done channel, if a value is immediately available
2299[info] - should select a done channel, when the channel is done immediately
2300[info] - should select a done channel, when the channel becomes done
2301[info] channel with capacity 100
2302[info] - should send and receive two spaced elements
2303[info] - should send and receive many elements, with concurrent senders & receivers
2304[info] - should select from two receives, if the last one has elements
2305[info] - should select from three receives, if the last one has elements
2306[info] - should select a receive from multiple channels
2307[info] - should select a receive until all channels are done
2308[info] - should properly report channel state
2309[info] - should select from a non-done channel, if a value is immediately available
2310[info] - should select a done channel, when the channel is done immediately
2311[info] - should select a done channel, when the channel becomes done
2312[info] channel with capacity 10000
2313[info] - should send and receive two spaced elements
2314[info] - should send and receive many elements, with concurrent senders & receivers
2315[info] - should select from two receives, if the last one has elements
2316[info] - should select from three receives, if the last one has elements
2317[info] - should select a receive from multiple channels
2318[info] - should select a receive until all channels are done
2319[info] - should properly report channel state
2320[info] - should select from a non-done channel, if a value is immediately available
2321[info] - should select a done channel, when the channel is done immediately
2322[info] - should select a done channel, when the channel becomes done
2323[info] buffered channel
2324[info] - should select a send when one is available
2325[info] channel
2326[info] - should receive from a channel until done
2327[info] - should not receive from a channel in case of an error
2328[info] rendezvous channel
2329[info] - should wait until elements are transmitted
2330[info] - should select a send when a receive is waiting
2331[info] - should select a send or receive depending on availability
2332[info] default
2333[info] - should use the default value if the clauses are not satisfiable
2334[info] - should not use the default value if a clause is satisfiable
2335[info] - should not use the default value if the channel is done
2336[info] - should use the default value once a source is done (buffered channel, stress test)
2337[info] + iteration 1
2338[info] + iteration 2
2339[info] + iteration 3
2340[info] + iteration 4
2341[info] + iteration 5
2342[info] + iteration 6
2343[info] + iteration 7
2344[info] + iteration 8
2345[info] + iteration 9
2346[info] + iteration 10
2347[info] + iteration 11
2348[info] + iteration 12
2349[info] + iteration 13
2350[info] + iteration 14
2351[info] + iteration 15
2352[info] + iteration 16
2353[info] + iteration 17
2354[info] + iteration 18
2355[info] + iteration 19
2356[info] + iteration 20
2357[info] + iteration 21
2358[info] + iteration 22
2359[info] + iteration 23
2360[info] + iteration 24
2361[info] + iteration 25
2362[info] + iteration 26
2363[info] + iteration 27
2364[info] + iteration 28
2365[info] + iteration 29
2366[info] + iteration 30
2367[info] + iteration 31
2368[info] + iteration 32
2369[info] + iteration 33
2370[info] + iteration 34
2371[info] + iteration 35
2372[info] + iteration 36
2373[info] + iteration 37
2374[info] + iteration 38
2375[info] + iteration 39
2376[info] + iteration 40
2377[info] + iteration 41
2378[info] + iteration 42
2379[info] + iteration 43
2380[info] + iteration 44
2381[info] + iteration 45
2382[info] + iteration 46
2383[info] + iteration 47
2384[info] + iteration 48
2385[info] + iteration 49
2386[info] + iteration 50
2387[info] + iteration 51
2388[info] + iteration 52
2389[info] + iteration 53
2390[info] + iteration 54
2391[info] + iteration 55
2392[info] + iteration 56
2393[info] + iteration 57
2394[info] + iteration 58
2395[info] + iteration 59
2396[info] + iteration 60
2397[info] + iteration 61
2398[info] + iteration 62
2399[info] + iteration 63
2400[info] + iteration 64
2401[info] + iteration 65
2402[info] + iteration 66
2403[info] + iteration 67
2404[info] + iteration 68
2405[info] + iteration 69
2406[info] + iteration 70
2407[info] + iteration 71
2408[info] + iteration 72
2409[info] + iteration 73
2410[info] + iteration 74
2411[info] + iteration 75
2412[info] + iteration 76
2413[info] + iteration 77
2414[info] + iteration 78
2415[info] + iteration 79
2416[info] + iteration 80
2417[info] + iteration 81
2418[info] + iteration 82
2419[info] + iteration 83
2420[info] + iteration 84
2421[info] + iteration 85
2422[info] + iteration 86
2423[info] + iteration 87
2424[info] + iteration 88
2425[info] + iteration 89
2426[info] + iteration 90
2427[info] + iteration 91
2428[info] + iteration 92
2429[info] + iteration 93
2430[info] + iteration 94
2431[info] + iteration 95
2432[info] + iteration 96
2433[info] + iteration 97
2434[info] + iteration 98
2435[info] + iteration 99
2436[info] + iteration 100
2437[info] FlowOpsOrElseTest:
2438[info] orElse
2439[info] - should emit elements only from the original source when it is not empty
2440[info] - should emit elements only from the alternative source when the original source is created empty
2441[info] - should emit elements only from the alternative source when the original source is empty
2442[info] - should return failed source when the original source is failed
2443[info] FlowOpsForeachTest:
2444[info] foreach
2445[info] - should iterate over a flow
2446[info] - should convert flow to a list
2447[info] SourceOpsEmptyTest:
2448[info] Source.empty
2449[info] - should be done
2450[info] - should be empty
2451[info] FlowOpsFlattenParTest:
2452[info] flattenPar
2453[info] - should pipe all elements of the child flows into the output flow
2454[info] - should handle empty flow
2455[info] - should handle singleton flow
2456[info] - should not flatten nested flows
2457[info] - should handle subsequent flatten calls
2458[info] - should run at most parallelism child flows
2459[info] - should pipe elements realtime
2460[info] - should propagate error of any of the child flows and stop piping
2461[info] - should propagate error of the parent flow and stop piping
2462[info] FlowOpsRetryTest:
2463[info] Flow.retry
2464[info] - should successfully run a flow without retries when no errors occur
2465[info] - should retry a failing flow with immediate schedule
2466[info] - should retry a failing flow with fixed interval schedule
2467[info] - should not retry a flow which fails downstream
2468[info] - should fail after exhausting all retry attempts
2469[info] - should use custom ResultPolicy to determine retry worthiness
2470[info] - should handle empty flows correctly
2471[info] - should handle flows that complete successfully on first attempt
2472[info] - should retry the entire flow when processing fails
2473[info] - should work with complex flows containing transformations
2474[info] - should not retry a flow which uses .take and control exceptions
2475[info] LocalTest:
2476[info] fork locals
2477[2025-12-04T13:29:38.993543001Z] [24] main mid
2478[2025-12-04T13:29:39.094333724Z] [313241] In f1 = x
2479[2025-12-04T13:29:39.094582647Z] [24] result = a
2480[2025-12-04T13:29:39.194609734Z] [313243] In f3 = z
2481[2025-12-04T13:29:39.194844962Z] [24] result = a
2482[info] - should properly propagate values using supervisedWhere
2483[2025-12-04T13:29:39.196345791Z] [24] main mid
2484[2025-12-04T13:29:39.296539638Z] [313244] In f1 = x
2485[2025-12-04T13:29:39.296724869Z] [24] result = a
2486[2025-12-04T13:29:39.397280021Z] [313246] In f3 = z
2487[2025-12-04T13:29:39.397523763Z] [24] result = a
2488[info] - should properly propagate values using unsupervisedWhere
2489[2025-12-04T13:29:39.399215525Z] [313248] nested1 = x
2490[2025-12-04T13:29:39.399744037Z] [313249] nested2 = x
2491[info] - should propagate values across multiple scopes
2492[2025-12-04T13:29:39.399861253Z] [24] outer = a
2493[info] - should propagate errors from forks created within local values
2494[2025-12-04T13:29:39.401613489Z] [24] v1
2495[2025-12-04T13:29:39.401986738Z] [24] v2
2496[2025-12-04T13:29:39.402161512Z] [24] RuntimeException
2497[2025-12-04T13:29:39.402244177Z] [24] v1
2498[info] - should correctly set & unset fork locals when an exception is thrown
2499[2025-12-04T13:29:39.402693408Z] [24] v1_1
2500[2025-12-04T13:29:39.402753781Z] [24] v2_1
2501[2025-12-04T13:29:39.403198784Z] [24] v1_2
2502[2025-12-04T13:29:39.403255460Z] [24] v2_2
2503[2025-12-04T13:29:39.403301226Z] [24] v1_1
2504[2025-12-04T13:29:39.403750339Z] [24] v2_1
2505[info] - should correctly set & unset multiple fork locals
2506[info] FlowOpsSampleTest:
2507[info] sample
2508[info] - should not sample anything from an empty flow
2509[info] - should not sample anything when 'n == 0'
2510[info] - should sample every element of the flow when 'n == 1'
2511[info] - should sample every nth element of the flow
2512[info] FlowOpsDrainTest:
2513[info] drain
2514[info] - should drain all elements
2515[info] - should run any side-effects that are part of the flow
2516[info] - should merge with another flow
2517[info] ActorTest:
2518[info] - should invoke methods on the actor
2519[info] - should protect the internal state of the actor
2520[info] - should run the close callback before re-throwing the exception
2521[info] - should end the scope when an exception is thrown when handling .tell
2522[info] - should throw a channel closed exception when the actor's scope becomes closed
2523[info] FlowOpsSlidingTest:
2524[info] sliding
2525[info] - should create sliding windows for n = 2 and step = 1
2526[info] - should create sliding windows for n = 3 and step = 1
2527[info] - should create sliding windows for n = 2 and step = 2
2528[info] - should create sliding windows for n = 3 and step = 2
2529[info] - should create sliding windows for n = 1 and step = 2
2530[info] - should create sliding windows for n = 2 and step = 3
2531[info] - should create sliding windows for n = 2 and step = 3 (with 1 element remaining in the end)
2532[info] - should return failed source when the original source is failed
2533[info] FlowOpsTickTest:
2534[info] - should tick regularly
2535[info] - should tick immediately in case of a slow consumer, and then resume normal
2536Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
2537Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0-RC3/classes ...
2539[info] done compiling
2540[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0-RC3/test-classes ...
2541[info] done compiling
2542[info] CronScheduleTest:
2543[info] repeat with cron schedule
2544[info] - should repeat a function every second (once)
2545[info] - should repeat a function every second (three times)
2546[info] - should provide initial delay
2547Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
2548Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2549[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.8.0-RC3/classes ...
2550[info] done compiling
2551Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
2552Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2553[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.8.0-RC3/classes ...
2554[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
2555[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
2556[warn] | ^^^^^^^
2557[warn] | unused explicit parameter
2558[warn] one warning found
2559[info] done compiling
2560[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.8.0-RC3/test-classes ...
2561[info] done compiling
256214:29:51.048 [pool-67-thread-7] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
2563 add.partitions.to.txn.retry.backoff.max.ms = 100
2564 add.partitions.to.txn.retry.backoff.ms = 20
2565 advertised.listeners = BROKER://localhost:6001
2566 alter.config.policy.class.name = null
2567 alter.log.dirs.replication.quota.window.num = 11
2568 alter.log.dirs.replication.quota.window.size.seconds = 1
2569 authorizer.class.name =
2570 auto.create.topics.enable = true
2571 auto.leader.rebalance.enable = true
2572 background.threads = 10
2573 broker.heartbeat.interval.ms = 2000
2574 broker.id = 0
2575 broker.rack = null
2576 broker.session.timeout.ms = 9000
2577 client.quota.callback.class = null
2578 compression.gzip.level = -1
2579 compression.lz4.level = 9
2580 compression.type = producer
2581 compression.zstd.level = 3
2582 connection.failed.authentication.delay.ms = 100
2583 connections.max.idle.ms = 600000
2584 connections.max.reauth.ms = 0
2585 controlled.shutdown.enable = true
2586 controller.listener.names = CONTROLLER
2587 controller.performance.always.log.threshold.ms = 2000
2588 controller.performance.sample.period.ms = 60000
2589 controller.quorum.append.linger.ms = 25
2590 controller.quorum.bootstrap.servers = []
2591 controller.quorum.election.backoff.max.ms = 1000
2592 controller.quorum.election.timeout.ms = 1000
2593 controller.quorum.fetch.timeout.ms = 2000
2594 controller.quorum.request.timeout.ms = 2000
2595 controller.quorum.retry.backoff.ms = 20
2596 controller.quorum.voters = [0@localhost:6002]
2597 controller.quota.window.num = 11
2598 controller.quota.window.size.seconds = 1
2599 controller.socket.timeout.ms = 30000
2600 create.topic.policy.class.name = null
2601 default.replication.factor = 1
2602 delegation.token.expiry.check.interval.ms = 3600000
2603 delegation.token.expiry.time.ms = 86400000
2604 delegation.token.max.lifetime.ms = 604800000
2605 delegation.token.secret.key = null
2606 delete.records.purgatory.purge.interval.requests = 1
2607 delete.topic.enable = true
2608 early.start.listeners = null
2609 fetch.max.bytes = 57671680
2610 fetch.purgatory.purge.interval.requests = 1000
2611 group.consumer.assignors = [uniform, range]
2612 group.consumer.heartbeat.interval.ms = 5000
2613 group.consumer.max.heartbeat.interval.ms = 15000
2614 group.consumer.max.session.timeout.ms = 60000
2615 group.consumer.max.size = 2147483647
2616 group.consumer.migration.policy = bidirectional
2617 group.consumer.min.heartbeat.interval.ms = 5000
2618 group.consumer.min.session.timeout.ms = 45000
2619 group.consumer.regex.refresh.interval.ms = 600000
2620 group.consumer.session.timeout.ms = 45000
2621 group.coordinator.append.linger.ms = 5
2622 group.coordinator.rebalance.protocols = [classic, consumer, streams]
2623 group.coordinator.threads = 4
2624 group.initial.rebalance.delay.ms = 3000
2625 group.max.session.timeout.ms = 1800000
2626 group.max.size = 2147483647
2627 group.min.session.timeout.ms = 6000
2628 group.share.assignors = [simple]
2629 group.share.delivery.count.limit = 5
2630 group.share.enable = false
2631 group.share.heartbeat.interval.ms = 5000
2632 group.share.max.heartbeat.interval.ms = 15000
2633 group.share.max.record.lock.duration.ms = 60000
2634 group.share.max.session.timeout.ms = 60000
2635 group.share.max.share.sessions = 2000
2636 group.share.max.size = 200
2637 group.share.min.heartbeat.interval.ms = 5000
2638 group.share.min.record.lock.duration.ms = 15000
2639 group.share.min.session.timeout.ms = 45000
2640 group.share.partition.max.record.locks = 2000
2641 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
2642 group.share.record.lock.duration.ms = 30000
2643 group.share.session.timeout.ms = 45000
2644 group.streams.heartbeat.interval.ms = 5000
2645 group.streams.max.heartbeat.interval.ms = 15000
2646 group.streams.max.session.timeout.ms = 60000
2647 group.streams.max.size = 2147483647
2648 group.streams.max.standby.replicas = 2
2649 group.streams.min.heartbeat.interval.ms = 5000
2650 group.streams.min.session.timeout.ms = 45000
2651 group.streams.num.standby.replicas = 0
2652 group.streams.session.timeout.ms = 45000
2653 initial.broker.registration.timeout.ms = 60000
2654 inter.broker.listener.name = BROKER
2655 internal.metadata.delete.delay.millis = 60000
2656 internal.metadata.log.segment.bytes = null
2657 internal.metadata.max.batch.size.in.bytes = 8388608
2658 internal.metadata.max.fetch.size.in.bytes = 8388608
2659 kafka.metrics.polling.interval.secs = 10
2660 kafka.metrics.reporters = []
2661 leader.imbalance.check.interval.seconds = 300
2662 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
2663 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
2664 log.cleaner.backoff.ms = 15000
2665 log.cleaner.dedupe.buffer.size = 1048577
2666 log.cleaner.delete.retention.ms = 86400000
2667 log.cleaner.enable = true
2668 log.cleaner.io.buffer.load.factor = 0.9
2669 log.cleaner.io.buffer.size = 524288
2670 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
2671 log.cleaner.max.compaction.lag.ms = 9223372036854775807
2672 log.cleaner.min.cleanable.ratio = 0.5
2673 log.cleaner.min.compaction.lag.ms = 0
2674 log.cleaner.threads = 1
2675 log.cleanup.policy = [delete]
2676 log.dir = /tmp/kafka-logs
2677 log.dir.failure.timeout.ms = 30000
2678 log.dirs = /tmp/kafka-logs7572554700115704093
2679 log.flush.interval.messages = 1
2680 log.flush.interval.ms = null
2681 log.flush.offset.checkpoint.interval.ms = 60000
2682 log.flush.scheduler.interval.ms = 9223372036854775807
2683 log.flush.start.offset.checkpoint.interval.ms = 60000
2684 log.index.interval.bytes = 4096
2685 log.index.size.max.bytes = 10485760
2686 log.initial.task.delay.ms = 30000
2687 log.local.retention.bytes = -2
2688 log.local.retention.ms = -2
2689 log.message.timestamp.after.max.ms = 3600000
2690 log.message.timestamp.before.max.ms = 9223372036854775807
2691 log.message.timestamp.type = CreateTime
2692 log.preallocate = false
2693 log.retention.bytes = -1
2694 log.retention.check.interval.ms = 300000
2695 log.retention.hours = 168
2696 log.retention.minutes = null
2697 log.retention.ms = null
2698 log.roll.hours = 168
2699 log.roll.jitter.hours = 0
2700 log.roll.jitter.ms = null
2701 log.roll.ms = null
2702 log.segment.bytes = 1073741824
2703 log.segment.delete.delay.ms = 60000
2704 max.connection.creation.rate = 2147483647
2705 max.connections = 2147483647
2706 max.connections.per.ip = 2147483647
2707 max.connections.per.ip.overrides =
2708 max.incremental.fetch.session.cache.slots = 1000
2709 max.request.partition.size.limit = 2000
2710 message.max.bytes = 1048588
2711 metadata.log.dir = null
2712 metadata.log.max.record.bytes.between.snapshots = 20971520
2713 metadata.log.max.snapshot.interval.ms = 3600000
2714 metadata.log.segment.bytes = 1073741824
2715 metadata.log.segment.ms = 604800000
2716 metadata.max.idle.interval.ms = 500
2717 metadata.max.retention.bytes = 104857600
2718 metadata.max.retention.ms = 604800000
2719 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
2720 metrics.num.samples = 2
2721 metrics.recording.level = INFO
2722 metrics.sample.window.ms = 30000
2723 min.insync.replicas = 1
2724 node.id = 0
2725 num.io.threads = 8
2726 num.network.threads = 3
2727 num.partitions = 1
2728 num.recovery.threads.per.data.dir = 2
2729 num.replica.alter.log.dirs.threads = null
2730 num.replica.fetchers = 1
2731 offset.metadata.max.bytes = 4096
2732 offsets.commit.timeout.ms = 5000
2733 offsets.load.buffer.size = 5242880
2734 offsets.retention.check.interval.ms = 600000
2735 offsets.retention.minutes = 10080
2736 offsets.topic.compression.codec = 0
2737 offsets.topic.num.partitions = 1
2738 offsets.topic.replication.factor = 1
2739 offsets.topic.segment.bytes = 104857600
2740 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
2741 process.roles = [broker, controller]
2742 producer.id.expiration.check.interval.ms = 600000
2743 producer.id.expiration.ms = 86400000
2744 producer.purgatory.purge.interval.requests = 1000
2745 queued.max.request.bytes = -1
2746 queued.max.requests = 500
2747 quota.window.num = 11
2748 quota.window.size.seconds = 1
2749 remote.fetch.max.wait.ms = 500
2750 remote.list.offsets.request.timeout.ms = 30000
2751 remote.log.index.file.cache.total.size.bytes = 1073741824
2752 remote.log.manager.copier.thread.pool.size = 10
2753 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
2754 remote.log.manager.copy.quota.window.num = 11
2755 remote.log.manager.copy.quota.window.size.seconds = 1
2756 remote.log.manager.expiration.thread.pool.size = 10
2757 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
2758 remote.log.manager.fetch.quota.window.num = 11
2759 remote.log.manager.fetch.quota.window.size.seconds = 1
2760 remote.log.manager.task.interval.ms = 30000
2761 remote.log.manager.task.retry.backoff.max.ms = 30000
2762 remote.log.manager.task.retry.backoff.ms = 500
2763 remote.log.manager.task.retry.jitter = 0.2
2764 remote.log.manager.thread.pool.size = 2
2765 remote.log.metadata.custom.metadata.max.bytes = 128
2766 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
2767 remote.log.metadata.manager.class.path = null
2768 remote.log.metadata.manager.impl.prefix = rlmm.config.
2769 remote.log.metadata.manager.listener.name = null
2770 remote.log.reader.max.pending.tasks = 100
2771 remote.log.reader.threads = 10
2772 remote.log.storage.manager.class.name = null
2773 remote.log.storage.manager.class.path = null
2774 remote.log.storage.manager.impl.prefix = rsm.config.
2775 remote.log.storage.system.enable = false
2776 replica.fetch.backoff.ms = 1000
2777 replica.fetch.max.bytes = 1048576
2778 replica.fetch.min.bytes = 1
2779 replica.fetch.response.max.bytes = 10485760
2780 replica.fetch.wait.max.ms = 500
2781 replica.high.watermark.checkpoint.interval.ms = 5000
2782 replica.lag.time.max.ms = 30000
2783 replica.selector.class = null
2784 replica.socket.receive.buffer.bytes = 65536
2785 replica.socket.timeout.ms = 30000
2786 replication.quota.window.num = 11
2787 replication.quota.window.size.seconds = 1
2788 request.timeout.ms = 30000
2789 sasl.client.callback.handler.class = null
2790 sasl.enabled.mechanisms = [GSSAPI]
2791 sasl.jaas.config = null
2792 sasl.kerberos.kinit.cmd = /usr/bin/kinit
2793 sasl.kerberos.min.time.before.relogin = 60000
2794 sasl.kerberos.principal.to.local.rules = [DEFAULT]
2795 sasl.kerberos.service.name = null
2796 sasl.kerberos.ticket.renew.jitter = 0.05
2797 sasl.kerberos.ticket.renew.window.factor = 0.8
2798 sasl.login.callback.handler.class = null
2799 sasl.login.class = null
2800 sasl.login.connect.timeout.ms = null
2801 sasl.login.read.timeout.ms = null
2802 sasl.login.refresh.buffer.seconds = 300
2803 sasl.login.refresh.min.period.seconds = 60
2804 sasl.login.refresh.window.factor = 0.8
2805 sasl.login.refresh.window.jitter = 0.05
2806 sasl.login.retry.backoff.max.ms = 10000
2807 sasl.login.retry.backoff.ms = 100
2808 sasl.mechanism.controller.protocol = GSSAPI
2809 sasl.mechanism.inter.broker.protocol = GSSAPI
2810 sasl.oauthbearer.assertion.algorithm = RS256
2811 sasl.oauthbearer.assertion.claim.aud = null
2812 sasl.oauthbearer.assertion.claim.exp.seconds = 300
2813 sasl.oauthbearer.assertion.claim.iss = null
2814 sasl.oauthbearer.assertion.claim.jti.include = false
2815 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
2816 sasl.oauthbearer.assertion.claim.sub = null
2817 sasl.oauthbearer.assertion.file = null
2818 sasl.oauthbearer.assertion.private.key.file = null
2819 sasl.oauthbearer.assertion.private.key.passphrase = null
2820 sasl.oauthbearer.assertion.template.file = null
2821 sasl.oauthbearer.client.credentials.client.id = null
2822 sasl.oauthbearer.client.credentials.client.secret = null
2823 sasl.oauthbearer.clock.skew.seconds = 30
2824 sasl.oauthbearer.expected.audience = null
2825 sasl.oauthbearer.expected.issuer = null
2826 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
2827 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
2828 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
2829 sasl.oauthbearer.jwks.endpoint.url = null
2830 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
2831 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
2832 sasl.oauthbearer.scope = null
2833 sasl.oauthbearer.scope.claim.name = scope
2834 sasl.oauthbearer.sub.claim.name = sub
2835 sasl.oauthbearer.token.endpoint.url = null
2836 sasl.server.callback.handler.class = null
2837 sasl.server.max.receive.size = 524288
2838 security.inter.broker.protocol = PLAINTEXT
2839 security.providers = null
2840 server.max.startup.time.ms = 9223372036854775807
2841 share.coordinator.append.linger.ms = 5
2842 share.coordinator.cold.partition.snapshot.interval.ms = 300000
2843 share.coordinator.load.buffer.size = 5242880
2844 share.coordinator.snapshot.update.records.per.snapshot = 500
2845 share.coordinator.state.topic.compression.codec = 0
2846 share.coordinator.state.topic.min.isr = 2
2847 share.coordinator.state.topic.num.partitions = 50
2848 share.coordinator.state.topic.prune.interval.ms = 300000
2849 share.coordinator.state.topic.replication.factor = 3
2850 share.coordinator.state.topic.segment.bytes = 104857600
2851 share.coordinator.threads = 1
2852 share.coordinator.write.timeout.ms = 5000
2853 share.fetch.purgatory.purge.interval.requests = 1000
2854 socket.connection.setup.timeout.max.ms = 30000
2855 socket.connection.setup.timeout.ms = 10000
2856 socket.listen.backlog.size = 50
2857 socket.receive.buffer.bytes = 102400
2858 socket.request.max.bytes = 104857600
2859 socket.send.buffer.bytes = 102400
2860 ssl.allow.dn.changes = false
2861 ssl.allow.san.changes = false
2862 ssl.cipher.suites = []
2863 ssl.client.auth = none
2864 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
2865 ssl.endpoint.identification.algorithm = https
2866 ssl.engine.factory.class = null
2867 ssl.key.password = null
2868 ssl.keymanager.algorithm = SunX509
2869 ssl.keystore.certificate.chain = null
2870 ssl.keystore.key = null
2871 ssl.keystore.location = null
2872 ssl.keystore.password = null
2873 ssl.keystore.type = JKS
2874 ssl.principal.mapping.rules = DEFAULT
2875 ssl.protocol = TLSv1.3
2876 ssl.provider = null
2877 ssl.secure.random.implementation = null
2878 ssl.trustmanager.algorithm = PKIX
2879 ssl.truststore.certificates = null
2880 ssl.truststore.location = null
2881 ssl.truststore.password = null
2882 ssl.truststore.type = JKS
2883 telemetry.max.bytes = 1048576
2884 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
2885 transaction.max.timeout.ms = 900000
2886 transaction.partition.verification.enable = true
2887 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
2888 transaction.state.log.load.buffer.size = 5242880
2889 transaction.state.log.min.isr = 1
2890 transaction.state.log.num.partitions = 50
2891 transaction.state.log.replication.factor = 1
2892 transaction.state.log.segment.bytes = 104857600
2893 transaction.two.phase.commit.enable = false
2894 transactional.id.expiration.ms = 604800000
2895 unclean.leader.election.enable = false
2896 unclean.leader.election.interval.ms = 300000
2897 unstable.api.versions.enable = false
2898 unstable.feature.versions.enable = false
2899
290014:29:51.266 [pool-67-thread-7] INFO k.u.Log4jControllerRegistration$ - Registered `kafka:type=kafka.Log4jController` MBean
290114:29:51.310 [pool-67-thread-7] INFO i.g.e.EmbeddedKafka$ - [KafkaRaftServer nodeId=0] Rewriting /tmp/kafka-logs7572554700115704093/meta.properties
290214:29:51.363 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Starting controller
290314:29:51.700 [pool-67-thread-7] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
290414:29:51.726 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(CONTROLLER)
290514:29:51.731 [pool-67-thread-7] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint CONTROLLER. Endpoint is now READY.
290614:29:51.732 [pool-67-thread-7] INFO k.s.SharedServer - [SharedServer id=0] Starting SharedServer
290714:29:51.777 [pool-67-thread-7] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
290814:29:51.778 [pool-67-thread-7] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs7572554700115704093] Reloading from producer snapshot and rebuilding producer state from offset 0
290914:29:51.778 [pool-67-thread-7] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs7572554700115704093] Producer state recovery took 0ms for snapshot load and 0ms for segment recovery from offset 0
291014:29:51.797 [pool-67-thread-7] INFO k.r.KafkaMetadataLog$ - Initialized snapshots with IDs SortedSet() from /tmp/kafka-logs7572554700115704093/__cluster_metadata-0
291114:29:51.808 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Starting
291214:29:51.821 [pool-67-thread-7] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Reading KRaft snapshot and log as part of the initialization
291314:29:51.822 [pool-67-thread-7] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting voters are VoterSet(voters={0=VoterNode(voterKey=ReplicaKey(id=0, directoryId=<undefined>), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/127.0.0.1:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:0])})
291414:29:51.826 [pool-67-thread-7] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting request manager with static voters: [localhost:6002 (id: 0 rack: null isFenced: false)]
291514:29:51.829 [pool-67-thread-7] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1660, highWatermark=Optional.empty) from null
291614:29:51.903 [pool-67-thread-7] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1660, highWatermark=Optional.empty) from null
291714:29:51.906 [pool-67-thread-7] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1791, highWatermark=Optional.empty) from UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1660, highWatermark=Optional.empty)
291814:29:51.907 [pool-67-thread-7] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to CandidateState(localId=0, localDirectoryId=wZSvsjOHZk681DfKN9_ltw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1893) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1791, highWatermark=Optional.empty)
291914:29:51.913 [pool-67-thread-7] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to CandidateState(localId=0, localDirectoryId=wZSvsjOHZk681DfKN9_ltw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1893) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1791, highWatermark=Optional.empty)
292014:29:51.917 [pool-67-thread-7] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=wZSvsjOHZk681DfKN9_ltw), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=wZSvsjOHZk681DfKN9_ltw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1893)
292114:29:51.919 [pool-67-thread-7] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=wZSvsjOHZk681DfKN9_ltw), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=wZSvsjOHZk681DfKN9_ltw, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1893)
292214:29:51.937 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Starting
292314:29:51.937 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Starting
292414:29:51.951 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] initializeNewPublishers: the loader is still catching up because we still don't know the high water mark yet.
292514:29:51.952 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for controller quorum voters future
292614:29:51.952 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for controller quorum voters future
292714:29:51.956 [kafka-0-raft-io-thread] INFO o.a.k.r.LeaderState - [RaftManager id=0] High watermark set to LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)]) for the first time for epoch 1 based on indexOfHw 0 and voters [ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional[LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)])], lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)]
292814:29:51.962 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.image.loader.MetadataLoader@1133527393
292914:29:51.963 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.image.loader.MetadataLoader@1133527393 to 0 since there are no snapshots
293014:29:51.965 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader is still catching up because we have not loaded a controller record as of offset 0 and high water mark is 1
293114:29:51.981 [pool-67-thread-7] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task writeNoOpRecord to run every 500 ms
293214:29:51.981 [pool-67-thread-7] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task maybeFenceStaleBroker to run every 1125 ms
293314:29:51.981 [pool-67-thread-7] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electPreferred to run every 300000 ms
293414:29:51.981 [pool-67-thread-7] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electUnclean to run every 300000 ms
293514:29:51.981 [pool-67-thread-7] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task expireDelegationTokens to run every 3600000 ms
293614:29:51.982 [pool-67-thread-7] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task generatePeriodicPerformanceMessage to run every 60000 ms
293714:29:51.982 [pool-67-thread-7] INFO o.a.k.c.QuorumController - [QuorumController id=0] Creating new QuorumController with clusterId Rv5ipS8WQ9OWJ9EWetzHMA
293814:29:51.983 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@1046441939
293914:29:51.983 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@1046441939 to 0 since there are no snapshots
294014:29:51.985 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] Becoming the active controller at epoch 1, next write offset 1.
294114:29:51.989 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Starting
294214:29:51.989 [quorum-controller-0-event-handler] WARN o.a.k.c.QuorumController - [QuorumController id=0] Performing controller activation. The metadata log appears to be empty. Appending 1 bootstrap record(s) in metadata transaction at metadata.version 4.1-IV1 from bootstrap source 'the default bootstrap'.
294314:29:51.990 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Starting
294414:29:51.990 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Starting
294514:29:51.992 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Starting
294614:29:51.993 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed BeginTransactionRecord(name='Bootstrap records') at offset 1.
294714:29:51.994 [quorum-controller-0-event-handler] INFO o.a.k.c.FeatureControlManager - [QuorumController id=0] Replayed a FeatureLevelRecord setting metadata.version to 4.1-IV1
294814:29:51.994 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed EndTransactionRecord() at offset 3.
294914:29:51.995 [quorum-controller-0-event-handler] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Activated periodic tasks: electPreferred, electUnclean, expireDelegationTokens, generatePeriodicPerformanceMessage, maybeFenceStaleBroker, writeNoOpRecord
295014:29:52.005 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
295114:29:52.017 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for the controller metadata publishers to be installed
295214:29:52.017 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for the controller metadata publishers to be installed
295314:29:52.017 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] initializeNewPublishers: The loader is still catching up because we have not loaded a controller record as of offset 0 and high water mark is 1
295414:29:52.017 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Enabling request processing.
295514:29:52.023 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader finished catching up to the current high water mark of 4
295614:29:52.025 [pool-67-thread-7] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6002.
295714:29:52.026 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing SnapshotGenerator with a snapshot at offset 3
295814:29:52.026 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing KRaftMetadataCachePublisher with a snapshot at offset 3
295914:29:52.026 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing FeaturesPublisher with a snapshot at offset 3
296014:29:52.034 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the authorizer futures to be completed
296114:29:52.034 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the authorizer futures to be completed
296214:29:52.034 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] initialized channel manager.
296314:29:52.034 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the SocketServer Acceptors to be started
296414:29:52.034 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
296514:29:52.034 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] maybeSendControllerRegistration: cannot register yet because the metadata.version is not known yet.
296614:29:52.035 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Starting
296714:29:52.035 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
296814:29:52.038 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTDOWN to STARTING
296914:29:52.038 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Starting broker
297014:29:52.040 [kafka-0-metadata-loader-event-handler] INFO o.a.k.m.p.FeaturesPublisher - [ControllerServer id=0] Loaded new metadata FinalizedFeatures[metadataVersion=4.1-IV1, finalizedFeatures={metadata.version=27}, finalizedFeaturesEpoch=3].
297114:29:52.040 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationsPublisher with a snapshot at offset 3
297214:29:52.040 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationManager with a snapshot at offset 3
297314:29:52.041 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicConfigPublisher controller id=0 with a snapshot at offset 3
297414:29:52.045 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicClientQuotaPublisher controller id=0 with a snapshot at offset 3
297514:29:52.046 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicTopicClusterQuotaPublisher controller id=0 with a snapshot at offset 3
297614:29:52.046 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ScramPublisher controller id=0 with a snapshot at offset 3
297714:29:52.046 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Starting
297814:29:52.047 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DelegationTokenPublisher controller id=0 with a snapshot at offset 3
297914:29:52.048 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Starting
298014:29:52.049 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Starting
298114:29:52.050 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerMetadataMetricsPublisher with a snapshot at offset 3
298214:29:52.051 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing AclPublisher controller id=0 with a snapshot at offset 3
298314:29:52.051 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Starting
298414:29:52.057 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] sendControllerRegistration: attempting to send ControllerRegistrationRequestData(controllerId=0, incarnationId=tHYPO5N7Ql6gyQ8C-tlBnA, zkMigrationReady=false, listeners=[Listener(name='CONTROLLER', host='localhost', port=6002, securityProtocol=0)], features=[Feature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), Feature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), Feature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)])
298514:29:52.072 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for controller quorum voters future
298614:29:52.072 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for controller quorum voters future
298714:29:52.075 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Starting
298814:29:52.075 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
298914:29:52.082 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Starting
299014:29:52.128 [pool-67-thread-7] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
299114:29:52.132 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(BROKER)
299214:29:52.138 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Starting
299314:29:52.139 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299414:29:52.145 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Starting
299514:29:52.150 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299614:29:52.151 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed RegisterControllerRecord containing ControllerRegistration(id=0, incarnationId=tHYPO5N7Ql6gyQ8C-tlBnA, zkMigrationReady=false, listeners=[Endpoint(listenerName='CONTROLLER', securityProtocol=PLAINTEXT, host='localhost', port=6002)], supportedFeatures={eligible.leader.replicas.version: 0-1, group.version: 0-1, kraft.version: 0-1, metadata.version: 7-27, share.version: 0-1, transaction.version: 0-2}).
299714:29:52.162 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Starting
299814:29:52.163 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Starting
299914:29:52.164 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Starting
300014:29:52.165 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Starting
300114:29:52.166 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Starting
300214:29:52.167 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Starting
300314:29:52.171 [controller-0-to-controller-registration-channel-manager] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] RegistrationResponseHandler: controller acknowledged ControllerRegistrationRequest.
300414:29:52.171 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] Our registration has been persisted to the metadata log.
300514:29:52.186 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Starting
300614:29:52.203 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Starting
300714:29:52.208 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Starting
300814:29:52.209 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Starting
300914:29:52.209 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Starting
301014:29:52.217 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Starting
301114:29:52.217 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Starting
301214:29:52.218 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Starting
301314:29:52.219 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Starting
301414:29:52.235 [pool-67-thread-7] INFO k.l.LogManager - Unable to read the broker epoch in /tmp/kafka-logs7572554700115704093.
301514:29:52.237 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Starting
301614:29:52.237 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
301714:29:52.239 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Incarnation K-jRYEn6SwSTCNAF_kXSxg of broker 0 in cluster Rv5ipS8WQ9OWJ9EWetzHMA is now STARTING.
301814:29:52.244 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Starting
301914:29:52.247 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] No previous registration found for broker 0. New incarnation ID is K-jRYEn6SwSTCNAF_kXSxg. Generated 0 record(s) to clean up previous incarnations. New broker epoch is 5.
302014:29:52.256 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed initial RegisterBrokerRecord for broker 0: RegisterBrokerRecord(brokerId=0, isMigratingZkBroker=false, incarnationId=K-jRYEn6SwSTCNAF_kXSxg, brokerEpoch=5, endPoints=[BrokerEndpoint(name='BROKER', host='localhost', port=6001, securityProtocol=0)], features=[BrokerFeature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), BrokerFeature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), BrokerFeature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)], rack=null, fenced=true, inControlledShutdown=false, logDirs=[wZSvsjOHZk681DfKN9_ltw])
302114:29:52.259 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
302214:29:52.275 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker metadata publishers to be installed
302314:29:52.278 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing MetadataVersionPublisher(id=0) with a snapshot at offset 5
302414:29:52.278 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerMetadataPublisher with a snapshot at offset 5
302514:29:52.279 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Successfully registered broker 0 with broker epoch 5
302614:29:52.279 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker metadata publishers to be installed
302714:29:52.279 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the controller to acknowledge that we are caught up
302814:29:52.281 [kafka-0-metadata-loader-event-handler] INFO k.s.m.BrokerMetadataPublisher - [BrokerMetadataPublisher id=0] Publishing initial metadata at offset OffsetAndEpoch[offset=5, epoch=1] with metadata.version Optional[4.1-IV1].
302914:29:52.282 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loading logs from log dirs ArrayBuffer(/tmp/kafka-logs7572554700115704093)
303014:29:52.285 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - No logs found to be loaded in /tmp/kafka-logs7572554700115704093
303114:29:52.285 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has caught up. Transitioning from STARTING to RECOVERY.
303214:29:52.285 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the controller to acknowledge that we are caught up
303314:29:52.285 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the initial broker metadata update to be published
303414:29:52.291 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loaded 0 logs in 7ms
303514:29:52.292 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log cleanup with a period of 300000 ms.
303614:29:52.293 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log flusher with a default period of 9223372036854775807 ms.
303714:29:52.298 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker is in RECOVERY.
303814:29:52.299 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.LogCleaner - Starting the log cleaner
303914:29:52.304 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Starting
304014:29:52.307 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Starting
304114:29:52.307 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Starting
304214:29:52.310 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Starting up.
304314:29:52.310 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Startup complete.
304414:29:52.311 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Starting up.
304514:29:52.312 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Startup complete.
304614:29:52.312 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Starting up.
304714:29:52.312 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Starting
304814:29:52.312 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Startup complete.
304914:29:52.317 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the initial broker metadata update to be published
305014:29:52.317 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerRegistrationTracker(id=0) with a snapshot at offset 5
305114:29:52.318 [pool-67-thread-7] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
3052 add.partitions.to.txn.retry.backoff.max.ms = 100
3053 add.partitions.to.txn.retry.backoff.ms = 20
3054 advertised.listeners = BROKER://localhost:6001
3055 alter.config.policy.class.name = null
3056 alter.log.dirs.replication.quota.window.num = 11
3057 alter.log.dirs.replication.quota.window.size.seconds = 1
3058 authorizer.class.name =
3059 auto.create.topics.enable = true
3060 auto.leader.rebalance.enable = true
3061 background.threads = 10
3062 broker.heartbeat.interval.ms = 2000
3063 broker.id = 0
3064 broker.rack = null
3065 broker.session.timeout.ms = 9000
3066 client.quota.callback.class = null
3067 compression.gzip.level = -1
3068 compression.lz4.level = 9
3069 compression.type = producer
3070 compression.zstd.level = 3
3071 connection.failed.authentication.delay.ms = 100
3072 connections.max.idle.ms = 600000
3073 connections.max.reauth.ms = 0
3074 controlled.shutdown.enable = true
3075 controller.listener.names = CONTROLLER
3076 controller.performance.always.log.threshold.ms = 2000
3077 controller.performance.sample.period.ms = 60000
3078 controller.quorum.append.linger.ms = 25
3079 controller.quorum.bootstrap.servers = []
3080 controller.quorum.election.backoff.max.ms = 1000
3081 controller.quorum.election.timeout.ms = 1000
3082 controller.quorum.fetch.timeout.ms = 2000
3083 controller.quorum.request.timeout.ms = 2000
3084 controller.quorum.retry.backoff.ms = 20
3085 controller.quorum.voters = [0@localhost:6002]
3086 controller.quota.window.num = 11
3087 controller.quota.window.size.seconds = 1
3088 controller.socket.timeout.ms = 30000
3089 create.topic.policy.class.name = null
3090 default.replication.factor = 1
3091 delegation.token.expiry.check.interval.ms = 3600000
3092 delegation.token.expiry.time.ms = 86400000
3093 delegation.token.max.lifetime.ms = 604800000
3094 delegation.token.secret.key = null
3095 delete.records.purgatory.purge.interval.requests = 1
3096 delete.topic.enable = true
3097 early.start.listeners = null
3098 fetch.max.bytes = 57671680
3099 fetch.purgatory.purge.interval.requests = 1000
3100 group.consumer.assignors = [uniform, range]
3101 group.consumer.heartbeat.interval.ms = 5000
3102 group.consumer.max.heartbeat.interval.ms = 15000
3103 group.consumer.max.session.timeout.ms = 60000
3104 group.consumer.max.size = 2147483647
3105 group.consumer.migration.policy = bidirectional
3106 group.consumer.min.heartbeat.interval.ms = 5000
3107 group.consumer.min.session.timeout.ms = 45000
3108 group.consumer.regex.refresh.interval.ms = 600000
3109 group.consumer.session.timeout.ms = 45000
3110 group.coordinator.append.linger.ms = 5
3111 group.coordinator.rebalance.protocols = [classic, consumer, streams]
3112 group.coordinator.threads = 4
3113 group.initial.rebalance.delay.ms = 3000
3114 group.max.session.timeout.ms = 1800000
3115 group.max.size = 2147483647
3116 group.min.session.timeout.ms = 6000
3117 group.share.assignors = [simple]
3118 group.share.delivery.count.limit = 5
3119 group.share.enable = false
3120 group.share.heartbeat.interval.ms = 5000
3121 group.share.max.heartbeat.interval.ms = 15000
3122 group.share.max.record.lock.duration.ms = 60000
3123 group.share.max.session.timeout.ms = 60000
3124 group.share.max.share.sessions = 2000
3125 group.share.max.size = 200
3126 group.share.min.heartbeat.interval.ms = 5000
3127 group.share.min.record.lock.duration.ms = 15000
3128 group.share.min.session.timeout.ms = 45000
3129 group.share.partition.max.record.locks = 2000
3130 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
3131 group.share.record.lock.duration.ms = 30000
3132 group.share.session.timeout.ms = 45000
3133 group.streams.heartbeat.interval.ms = 5000
3134 group.streams.max.heartbeat.interval.ms = 15000
3135 group.streams.max.session.timeout.ms = 60000
3136 group.streams.max.size = 2147483647
3137 group.streams.max.standby.replicas = 2
3138 group.streams.min.heartbeat.interval.ms = 5000
3139 group.streams.min.session.timeout.ms = 45000
3140 group.streams.num.standby.replicas = 0
3141 group.streams.session.timeout.ms = 45000
3142 initial.broker.registration.timeout.ms = 60000
3143 inter.broker.listener.name = BROKER
3144 internal.metadata.delete.delay.millis = 60000
3145 internal.metadata.log.segment.bytes = null
3146 internal.metadata.max.batch.size.in.bytes = 8388608
3147 internal.metadata.max.fetch.size.in.bytes = 8388608
3148 kafka.metrics.polling.interval.secs = 10
3149 kafka.metrics.reporters = []
3150 leader.imbalance.check.interval.seconds = 300
3151 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
3152 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
3153 log.cleaner.backoff.ms = 15000
3154 log.cleaner.dedupe.buffer.size = 1048577
3155 log.cleaner.delete.retention.ms = 86400000
3156 log.cleaner.enable = true
3157 log.cleaner.io.buffer.load.factor = 0.9
3158 log.cleaner.io.buffer.size = 524288
3159 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
3160 log.cleaner.max.compaction.lag.ms = 9223372036854775807
3161 log.cleaner.min.cleanable.ratio = 0.5
3162 log.cleaner.min.compaction.lag.ms = 0
3163 log.cleaner.threads = 1
3164 log.cleanup.policy = [delete]
3165 log.dir = /tmp/kafka-logs
3166 log.dir.failure.timeout.ms = 30000
3167 log.dirs = /tmp/kafka-logs7572554700115704093
3168 log.flush.interval.messages = 1
3169 log.flush.interval.ms = null
3170 log.flush.offset.checkpoint.interval.ms = 60000
3171 log.flush.scheduler.interval.ms = 9223372036854775807
3172 log.flush.start.offset.checkpoint.interval.ms = 60000
3173 log.index.interval.bytes = 4096
3174 log.index.size.max.bytes = 10485760
3175 log.initial.task.delay.ms = 30000
3176 log.local.retention.bytes = -2
3177 log.local.retention.ms = -2
3178 log.message.timestamp.after.max.ms = 3600000
3179 log.message.timestamp.before.max.ms = 9223372036854775807
3180 log.message.timestamp.type = CreateTime
3181 log.preallocate = false
3182 log.retention.bytes = -1
3183 log.retention.check.interval.ms = 300000
3184 log.retention.hours = 168
3185 log.retention.minutes = null
3186 log.retention.ms = null
3187 log.roll.hours = 168
3188 log.roll.jitter.hours = 0
3189 log.roll.jitter.ms = null
3190 log.roll.ms = null
3191 log.segment.bytes = 1073741824
3192 log.segment.delete.delay.ms = 60000
3193 max.connection.creation.rate = 2147483647
3194 max.connections = 2147483647
3195 max.connections.per.ip = 2147483647
3196 max.connections.per.ip.overrides =
3197 max.incremental.fetch.session.cache.slots = 1000
3198 max.request.partition.size.limit = 2000
3199 message.max.bytes = 1048588
3200 metadata.log.dir = null
3201 metadata.log.max.record.bytes.between.snapshots = 20971520
3202 metadata.log.max.snapshot.interval.ms = 3600000
3203 metadata.log.segment.bytes = 1073741824
3204 metadata.log.segment.ms = 604800000
3205 metadata.max.idle.interval.ms = 500
3206 metadata.max.retention.bytes = 104857600
3207 metadata.max.retention.ms = 604800000
3208 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3209 metrics.num.samples = 2
3210 metrics.recording.level = INFO
3211 metrics.sample.window.ms = 30000
3212 min.insync.replicas = 1
3213 node.id = 0
3214 num.io.threads = 8
3215 num.network.threads = 3
3216 num.partitions = 1
3217 num.recovery.threads.per.data.dir = 2
3218 num.replica.alter.log.dirs.threads = null
3219 num.replica.fetchers = 1
3220 offset.metadata.max.bytes = 4096
3221 offsets.commit.timeout.ms = 5000
3222 offsets.load.buffer.size = 5242880
3223 offsets.retention.check.interval.ms = 600000
3224 offsets.retention.minutes = 10080
3225 offsets.topic.compression.codec = 0
3226 offsets.topic.num.partitions = 1
3227 offsets.topic.replication.factor = 1
3228 offsets.topic.segment.bytes = 104857600
3229 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
3230 process.roles = [broker, controller]
3231 producer.id.expiration.check.interval.ms = 600000
3232 producer.id.expiration.ms = 86400000
3233 producer.purgatory.purge.interval.requests = 1000
3234 queued.max.request.bytes = -1
3235 queued.max.requests = 500
3236 quota.window.num = 11
3237 quota.window.size.seconds = 1
3238 remote.fetch.max.wait.ms = 500
3239 remote.list.offsets.request.timeout.ms = 30000
3240 remote.log.index.file.cache.total.size.bytes = 1073741824
3241 remote.log.manager.copier.thread.pool.size = 10
3242 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
3243 remote.log.manager.copy.quota.window.num = 11
3244 remote.log.manager.copy.quota.window.size.seconds = 1
3245 remote.log.manager.expiration.thread.pool.size = 10
3246 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
3247 remote.log.manager.fetch.quota.window.num = 11
3248 remote.log.manager.fetch.quota.window.size.seconds = 1
3249 remote.log.manager.task.interval.ms = 30000
3250 remote.log.manager.task.retry.backoff.max.ms = 30000
3251 remote.log.manager.task.retry.backoff.ms = 500
3252 remote.log.manager.task.retry.jitter = 0.2
3253 remote.log.manager.thread.pool.size = 2
3254 remote.log.metadata.custom.metadata.max.bytes = 128
3255 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
3256 remote.log.metadata.manager.class.path = null
3257 remote.log.metadata.manager.impl.prefix = rlmm.config.
3258 remote.log.metadata.manager.listener.name = null
3259 remote.log.reader.max.pending.tasks = 100
3260 remote.log.reader.threads = 10
3261 remote.log.storage.manager.class.name = null
3262 remote.log.storage.manager.class.path = null
3263 remote.log.storage.manager.impl.prefix = rsm.config.
3264 remote.log.storage.system.enable = false
3265 replica.fetch.backoff.ms = 1000
3266 replica.fetch.max.bytes = 1048576
3267 replica.fetch.min.bytes = 1
3268 replica.fetch.response.max.bytes = 10485760
3269 replica.fetch.wait.max.ms = 500
3270 replica.high.watermark.checkpoint.interval.ms = 5000
3271 replica.lag.time.max.ms = 30000
3272 replica.selector.class = null
3273 replica.socket.receive.buffer.bytes = 65536
3274 replica.socket.timeout.ms = 30000
3275 replication.quota.window.num = 11
3276 replication.quota.window.size.seconds = 1
3277 request.timeout.ms = 30000
3278 sasl.client.callback.handler.class = null
3279 sasl.enabled.mechanisms = [GSSAPI]
3280 sasl.jaas.config = null
3281 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3282 sasl.kerberos.min.time.before.relogin = 60000
3283 sasl.kerberos.principal.to.local.rules = [DEFAULT]
3284 sasl.kerberos.service.name = null
3285 sasl.kerberos.ticket.renew.jitter = 0.05
3286 sasl.kerberos.ticket.renew.window.factor = 0.8
3287 sasl.login.callback.handler.class = null
3288 sasl.login.class = null
3289 sasl.login.connect.timeout.ms = null
3290 sasl.login.read.timeout.ms = null
3291 sasl.login.refresh.buffer.seconds = 300
3292 sasl.login.refresh.min.period.seconds = 60
3293 sasl.login.refresh.window.factor = 0.8
3294 sasl.login.refresh.window.jitter = 0.05
3295 sasl.login.retry.backoff.max.ms = 10000
3296 sasl.login.retry.backoff.ms = 100
3297 sasl.mechanism.controller.protocol = GSSAPI
3298 sasl.mechanism.inter.broker.protocol = GSSAPI
3299 sasl.oauthbearer.assertion.algorithm = RS256
3300 sasl.oauthbearer.assertion.claim.aud = null
3301 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3302 sasl.oauthbearer.assertion.claim.iss = null
3303 sasl.oauthbearer.assertion.claim.jti.include = false
3304 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3305 sasl.oauthbearer.assertion.claim.sub = null
3306 sasl.oauthbearer.assertion.file = null
3307 sasl.oauthbearer.assertion.private.key.file = null
3308 sasl.oauthbearer.assertion.private.key.passphrase = null
3309 sasl.oauthbearer.assertion.template.file = null
3310 sasl.oauthbearer.client.credentials.client.id = null
3311 sasl.oauthbearer.client.credentials.client.secret = null
3312 sasl.oauthbearer.clock.skew.seconds = 30
3313 sasl.oauthbearer.expected.audience = null
3314 sasl.oauthbearer.expected.issuer = null
3315 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3316 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3317 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3318 sasl.oauthbearer.jwks.endpoint.url = null
3319 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3320 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3321 sasl.oauthbearer.scope = null
3322 sasl.oauthbearer.scope.claim.name = scope
3323 sasl.oauthbearer.sub.claim.name = sub
3324 sasl.oauthbearer.token.endpoint.url = null
3325 sasl.server.callback.handler.class = null
3326 sasl.server.max.receive.size = 524288
3327 security.inter.broker.protocol = PLAINTEXT
3328 security.providers = null
3329 server.max.startup.time.ms = 9223372036854775807
3330 share.coordinator.append.linger.ms = 5
3331 share.coordinator.cold.partition.snapshot.interval.ms = 300000
3332 share.coordinator.load.buffer.size = 5242880
3333 share.coordinator.snapshot.update.records.per.snapshot = 500
3334 share.coordinator.state.topic.compression.codec = 0
3335 share.coordinator.state.topic.min.isr = 2
3336 share.coordinator.state.topic.num.partitions = 50
3337 share.coordinator.state.topic.prune.interval.ms = 300000
3338 share.coordinator.state.topic.replication.factor = 3
3339 share.coordinator.state.topic.segment.bytes = 104857600
3340 share.coordinator.threads = 1
3341 share.coordinator.write.timeout.ms = 5000
3342 share.fetch.purgatory.purge.interval.requests = 1000
3343 socket.connection.setup.timeout.max.ms = 30000
3344 socket.connection.setup.timeout.ms = 10000
3345 socket.listen.backlog.size = 50
3346 socket.receive.buffer.bytes = 102400
3347 socket.request.max.bytes = 104857600
3348 socket.send.buffer.bytes = 102400
3349 ssl.allow.dn.changes = false
3350 ssl.allow.san.changes = false
3351 ssl.cipher.suites = []
3352 ssl.client.auth = none
3353 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3354 ssl.endpoint.identification.algorithm = https
3355 ssl.engine.factory.class = null
3356 ssl.key.password = null
3357 ssl.keymanager.algorithm = SunX509
3358 ssl.keystore.certificate.chain = null
3359 ssl.keystore.key = null
3360 ssl.keystore.location = null
3361 ssl.keystore.password = null
3362 ssl.keystore.type = JKS
3363 ssl.principal.mapping.rules = DEFAULT
3364 ssl.protocol = TLSv1.3
3365 ssl.provider = null
3366 ssl.secure.random.implementation = null
3367 ssl.trustmanager.algorithm = PKIX
3368 ssl.truststore.certificates = null
3369 ssl.truststore.location = null
3370 ssl.truststore.password = null
3371 ssl.truststore.type = JKS
3372 telemetry.max.bytes = 1048576
3373 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
3374 transaction.max.timeout.ms = 900000
3375 transaction.partition.verification.enable = true
3376 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
3377 transaction.state.log.load.buffer.size = 5242880
3378 transaction.state.log.min.isr = 1
3379 transaction.state.log.num.partitions = 50
3380 transaction.state.log.replication.factor = 1
3381 transaction.state.log.segment.bytes = 104857600
3382 transaction.two.phase.commit.enable = false
3383 transactional.id.expiration.ms = 604800000
3384 unclean.leader.election.enable = false
3385 unclean.leader.election.interval.ms = 300000
3386 unstable.api.versions.enable = false
3387 unstable.feature.versions.enable = false
3388
338914:29:52.322 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker to be unfenced
339014:29:52.325 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to unfence has been granted because it has caught up with the offset of its register broker record 5.
339114:29:52.330 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=-1, inControlledShutdown=0, logDirs=[])
339214:29:52.354 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has been unfenced. Transitioning from RECOVERY to RUNNING.
339314:29:52.354 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker to be unfenced
339414:29:52.355 [pool-67-thread-7] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint BROKER. Endpoint is now READY.
339514:29:52.356 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Enabling request processing.
339614:29:52.356 [pool-67-thread-7] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6001.
339714:29:52.357 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the authorizer futures to be completed
339814:29:52.357 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the authorizer futures to be completed
339914:29:52.357 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the SocketServer Acceptors to be started
340014:29:52.357 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
340114:29:52.357 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTING to STARTED
340214:29:52.380 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3403 acks = -1
3404 batch.size = 16384
3405 bootstrap.servers = [localhost:6001]
3406 buffer.memory = 33554432
3407 client.dns.lookup = use_all_dns_ips
3408 client.id = producer-1
3409 compression.gzip.level = -1
3410 compression.lz4.level = 9
3411 compression.type = none
3412 compression.zstd.level = 3
3413 connections.max.idle.ms = 540000
3414 delivery.timeout.ms = 120000
3415 enable.idempotence = true
3416 enable.metrics.push = true
3417 interceptor.classes = []
3418 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3419 linger.ms = 5
3420 max.block.ms = 10000
3421 max.in.flight.requests.per.connection = 5
3422 max.request.size = 1048576
3423 metadata.max.age.ms = 300000
3424 metadata.max.idle.ms = 300000
3425 metadata.recovery.rebootstrap.trigger.ms = 300000
3426 metadata.recovery.strategy = rebootstrap
3427 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3428 metrics.num.samples = 2
3429 metrics.recording.level = INFO
3430 metrics.sample.window.ms = 30000
3431 partitioner.adaptive.partitioning.enable = true
3432 partitioner.availability.timeout.ms = 0
3433 partitioner.class = null
3434 partitioner.ignore.keys = false
3435 receive.buffer.bytes = 32768
3436 reconnect.backoff.max.ms = 1000
3437 reconnect.backoff.ms = 50
3438 request.timeout.ms = 30000
3439 retries = 2147483647
3440 retry.backoff.max.ms = 1000
3441 retry.backoff.ms = 1000
3442 sasl.client.callback.handler.class = null
3443 sasl.jaas.config = null
3444 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3445 sasl.kerberos.min.time.before.relogin = 60000
3446 sasl.kerberos.service.name = null
3447 sasl.kerberos.ticket.renew.jitter = 0.05
3448 sasl.kerberos.ticket.renew.window.factor = 0.8
3449 sasl.login.callback.handler.class = null
3450 sasl.login.class = null
3451 sasl.login.connect.timeout.ms = null
3452 sasl.login.read.timeout.ms = null
3453 sasl.login.refresh.buffer.seconds = 300
3454 sasl.login.refresh.min.period.seconds = 60
3455 sasl.login.refresh.window.factor = 0.8
3456 sasl.login.refresh.window.jitter = 0.05
3457 sasl.login.retry.backoff.max.ms = 10000
3458 sasl.login.retry.backoff.ms = 100
3459 sasl.mechanism = GSSAPI
3460 sasl.oauthbearer.assertion.algorithm = RS256
3461 sasl.oauthbearer.assertion.claim.aud = null
3462 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3463 sasl.oauthbearer.assertion.claim.iss = null
3464 sasl.oauthbearer.assertion.claim.jti.include = false
3465 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3466 sasl.oauthbearer.assertion.claim.sub = null
3467 sasl.oauthbearer.assertion.file = null
3468 sasl.oauthbearer.assertion.private.key.file = null
3469 sasl.oauthbearer.assertion.private.key.passphrase = null
3470 sasl.oauthbearer.assertion.template.file = null
3471 sasl.oauthbearer.client.credentials.client.id = null
3472 sasl.oauthbearer.client.credentials.client.secret = null
3473 sasl.oauthbearer.clock.skew.seconds = 30
3474 sasl.oauthbearer.expected.audience = null
3475 sasl.oauthbearer.expected.issuer = null
3476 sasl.oauthbearer.header.urlencode = false
3477 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3478 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3479 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3480 sasl.oauthbearer.jwks.endpoint.url = null
3481 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3482 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3483 sasl.oauthbearer.scope = null
3484 sasl.oauthbearer.scope.claim.name = scope
3485 sasl.oauthbearer.sub.claim.name = sub
3486 sasl.oauthbearer.token.endpoint.url = null
3487 security.protocol = PLAINTEXT
3488 security.providers = null
3489 send.buffer.bytes = 131072
3490 socket.connection.setup.timeout.max.ms = 30000
3491 socket.connection.setup.timeout.ms = 10000
3492 ssl.cipher.suites = null
3493 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3494 ssl.endpoint.identification.algorithm = https
3495 ssl.engine.factory.class = null
3496 ssl.key.password = null
3497 ssl.keymanager.algorithm = SunX509
3498 ssl.keystore.certificate.chain = null
3499 ssl.keystore.key = null
3500 ssl.keystore.location = null
3501 ssl.keystore.password = null
3502 ssl.keystore.type = JKS
3503 ssl.protocol = TLSv1.3
3504 ssl.provider = null
3505 ssl.secure.random.implementation = null
3506 ssl.trustmanager.algorithm = PKIX
3507 ssl.truststore.certificates = null
3508 ssl.truststore.location = null
3509 ssl.truststore.password = null
3510 ssl.truststore.type = JKS
3511 transaction.timeout.ms = 60000
3512 transaction.two.phase.commit.enable = false
3513 transactional.id = null
3514 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3515
351614:29:52.404 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
351714:29:52.412 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Instantiated an idempotent producer.
351814:29:52.427 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
351914:29:52.427 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
352014:29:52.427 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764854992427
352114:29:52.446 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t1) to the active controller.
352214:29:52.453 [kafka-producer-network-thread | producer-1] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-1] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t1=UNKNOWN_TOPIC_OR_PARTITION}
352314:29:52.454 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.Metadata - [Producer clientId=producer-1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
352414:29:52.464 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
352514:29:52.465 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t1 with topic ID WhJh5sHeTC-ZloPVATgk-g.
352614:29:52.468 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t1-0 with topic ID WhJh5sHeTC-ZloPVATgk-g and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
352714:29:52.494 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
352814:29:52.495 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t1-0)
352914:29:52.497 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t1-0 with topic id WhJh5sHeTC-ZloPVATgk-g.
353014:29:52.500 [quorum-controller-0-event-handler] INFO o.a.k.c.ProducerIdControlManager - [QuorumController id=0] Replaying ProducerIdsRecord ProducerIdsRecord(brokerId=0, brokerEpoch=5, nextProducerId=1000)
353114:29:52.510 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t1-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
353214:29:52.512 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t1-0 in /tmp/kafka-logs7572554700115704093/t1-0 with properties {}
353314:29:52.513 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] No checkpointed highwatermark is found for partition t1-0
353414:29:52.514 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] Log loaded for partition t1-0 with initial high watermark 0
353514:29:52.519 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t1-0 with topic id Some(WhJh5sHeTC-ZloPVATgk-g) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
353614:29:53.461 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-1] ProducerId set to 0 with epoch 0
353714:29:53.495 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
353814:29:53.502 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
353914:29:53.502 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
354014:29:53.502 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
354114:29:53.502 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
354214:29:53.503 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-1 unregistered
354314:29:53.504 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3544 acks = -1
3545 batch.size = 16384
3546 bootstrap.servers = [localhost:6001]
3547 buffer.memory = 33554432
3548 client.dns.lookup = use_all_dns_ips
3549 client.id = producer-2
3550 compression.gzip.level = -1
3551 compression.lz4.level = 9
3552 compression.type = none
3553 compression.zstd.level = 3
3554 connections.max.idle.ms = 540000
3555 delivery.timeout.ms = 120000
3556 enable.idempotence = true
3557 enable.metrics.push = true
3558 interceptor.classes = []
3559 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3560 linger.ms = 5
3561 max.block.ms = 10000
3562 max.in.flight.requests.per.connection = 5
3563 max.request.size = 1048576
3564 metadata.max.age.ms = 300000
3565 metadata.max.idle.ms = 300000
3566 metadata.recovery.rebootstrap.trigger.ms = 300000
3567 metadata.recovery.strategy = rebootstrap
3568 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3569 metrics.num.samples = 2
3570 metrics.recording.level = INFO
3571 metrics.sample.window.ms = 30000
3572 partitioner.adaptive.partitioning.enable = true
3573 partitioner.availability.timeout.ms = 0
3574 partitioner.class = null
3575 partitioner.ignore.keys = false
3576 receive.buffer.bytes = 32768
3577 reconnect.backoff.max.ms = 1000
3578 reconnect.backoff.ms = 50
3579 request.timeout.ms = 30000
3580 retries = 2147483647
3581 retry.backoff.max.ms = 1000
3582 retry.backoff.ms = 1000
3583 sasl.client.callback.handler.class = null
3584 sasl.jaas.config = null
3585 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3586 sasl.kerberos.min.time.before.relogin = 60000
3587 sasl.kerberos.service.name = null
3588 sasl.kerberos.ticket.renew.jitter = 0.05
3589 sasl.kerberos.ticket.renew.window.factor = 0.8
3590 sasl.login.callback.handler.class = null
3591 sasl.login.class = null
3592 sasl.login.connect.timeout.ms = null
3593 sasl.login.read.timeout.ms = null
3594 sasl.login.refresh.buffer.seconds = 300
3595 sasl.login.refresh.min.period.seconds = 60
3596 sasl.login.refresh.window.factor = 0.8
3597 sasl.login.refresh.window.jitter = 0.05
3598 sasl.login.retry.backoff.max.ms = 10000
3599 sasl.login.retry.backoff.ms = 100
3600 sasl.mechanism = GSSAPI
3601 sasl.oauthbearer.assertion.algorithm = RS256
3602 sasl.oauthbearer.assertion.claim.aud = null
3603 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3604 sasl.oauthbearer.assertion.claim.iss = null
3605 sasl.oauthbearer.assertion.claim.jti.include = false
3606 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3607 sasl.oauthbearer.assertion.claim.sub = null
3608 sasl.oauthbearer.assertion.file = null
3609 sasl.oauthbearer.assertion.private.key.file = null
3610 sasl.oauthbearer.assertion.private.key.passphrase = null
3611 sasl.oauthbearer.assertion.template.file = null
3612 sasl.oauthbearer.client.credentials.client.id = null
3613 sasl.oauthbearer.client.credentials.client.secret = null
3614 sasl.oauthbearer.clock.skew.seconds = 30
3615 sasl.oauthbearer.expected.audience = null
3616 sasl.oauthbearer.expected.issuer = null
3617 sasl.oauthbearer.header.urlencode = false
3618 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3619 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3620 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3621 sasl.oauthbearer.jwks.endpoint.url = null
3622 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3623 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3624 sasl.oauthbearer.scope = null
3625 sasl.oauthbearer.scope.claim.name = scope
3626 sasl.oauthbearer.sub.claim.name = sub
3627 sasl.oauthbearer.token.endpoint.url = null
3628 security.protocol = PLAINTEXT
3629 security.providers = null
3630 send.buffer.bytes = 131072
3631 socket.connection.setup.timeout.max.ms = 30000
3632 socket.connection.setup.timeout.ms = 10000
3633 ssl.cipher.suites = null
3634 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3635 ssl.endpoint.identification.algorithm = https
3636 ssl.engine.factory.class = null
3637 ssl.key.password = null
3638 ssl.keymanager.algorithm = SunX509
3639 ssl.keystore.certificate.chain = null
3640 ssl.keystore.key = null
3641 ssl.keystore.location = null
3642 ssl.keystore.password = null
3643 ssl.keystore.type = JKS
3644 ssl.protocol = TLSv1.3
3645 ssl.provider = null
3646 ssl.secure.random.implementation = null
3647 ssl.trustmanager.algorithm = PKIX
3648 ssl.truststore.certificates = null
3649 ssl.truststore.location = null
3650 ssl.truststore.password = null
3651 ssl.truststore.type = JKS
3652 transaction.timeout.ms = 60000
3653 transaction.two.phase.commit.enable = false
3654 transactional.id = null
3655 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3656
365714:29:53.505 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
365814:29:53.505 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Instantiated an idempotent producer.
365914:29:53.510 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
366014:29:53.510 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
366114:29:53.510 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764854993510
366214:29:53.516 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.Metadata - [Producer clientId=producer-2] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
366314:29:53.517 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-2] ProducerId set to 1 with epoch 0
366414:29:53.529 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
366514:29:53.533 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
366614:29:53.533 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
366714:29:53.533 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
366814:29:53.533 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
366914:29:53.533 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-2 unregistered
367014:29:53.534 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3671 acks = -1
3672 batch.size = 16384
3673 bootstrap.servers = [localhost:6001]
3674 buffer.memory = 33554432
3675 client.dns.lookup = use_all_dns_ips
3676 client.id = producer-3
3677 compression.gzip.level = -1
3678 compression.lz4.level = 9
3679 compression.type = none
3680 compression.zstd.level = 3
3681 connections.max.idle.ms = 540000
3682 delivery.timeout.ms = 120000
3683 enable.idempotence = true
3684 enable.metrics.push = true
3685 interceptor.classes = []
3686 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3687 linger.ms = 5
3688 max.block.ms = 10000
3689 max.in.flight.requests.per.connection = 5
3690 max.request.size = 1048576
3691 metadata.max.age.ms = 300000
3692 metadata.max.idle.ms = 300000
3693 metadata.recovery.rebootstrap.trigger.ms = 300000
3694 metadata.recovery.strategy = rebootstrap
3695 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3696 metrics.num.samples = 2
3697 metrics.recording.level = INFO
3698 metrics.sample.window.ms = 30000
3699 partitioner.adaptive.partitioning.enable = true
3700 partitioner.availability.timeout.ms = 0
3701 partitioner.class = null
3702 partitioner.ignore.keys = false
3703 receive.buffer.bytes = 32768
3704 reconnect.backoff.max.ms = 1000
3705 reconnect.backoff.ms = 50
3706 request.timeout.ms = 30000
3707 retries = 2147483647
3708 retry.backoff.max.ms = 1000
3709 retry.backoff.ms = 1000
3710 sasl.client.callback.handler.class = null
3711 sasl.jaas.config = null
3712 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3713 sasl.kerberos.min.time.before.relogin = 60000
3714 sasl.kerberos.service.name = null
3715 sasl.kerberos.ticket.renew.jitter = 0.05
3716 sasl.kerberos.ticket.renew.window.factor = 0.8
3717 sasl.login.callback.handler.class = null
3718 sasl.login.class = null
3719 sasl.login.connect.timeout.ms = null
3720 sasl.login.read.timeout.ms = null
3721 sasl.login.refresh.buffer.seconds = 300
3722 sasl.login.refresh.min.period.seconds = 60
3723 sasl.login.refresh.window.factor = 0.8
3724 sasl.login.refresh.window.jitter = 0.05
3725 sasl.login.retry.backoff.max.ms = 10000
3726 sasl.login.retry.backoff.ms = 100
3727 sasl.mechanism = GSSAPI
3728 sasl.oauthbearer.assertion.algorithm = RS256
3729 sasl.oauthbearer.assertion.claim.aud = null
3730 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3731 sasl.oauthbearer.assertion.claim.iss = null
3732 sasl.oauthbearer.assertion.claim.jti.include = false
3733 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3734 sasl.oauthbearer.assertion.claim.sub = null
3735 sasl.oauthbearer.assertion.file = null
3736 sasl.oauthbearer.assertion.private.key.file = null
3737 sasl.oauthbearer.assertion.private.key.passphrase = null
3738 sasl.oauthbearer.assertion.template.file = null
3739 sasl.oauthbearer.client.credentials.client.id = null
3740 sasl.oauthbearer.client.credentials.client.secret = null
3741 sasl.oauthbearer.clock.skew.seconds = 30
3742 sasl.oauthbearer.expected.audience = null
3743 sasl.oauthbearer.expected.issuer = null
3744 sasl.oauthbearer.header.urlencode = false
3745 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3746 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3747 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3748 sasl.oauthbearer.jwks.endpoint.url = null
3749 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3750 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3751 sasl.oauthbearer.scope = null
3752 sasl.oauthbearer.scope.claim.name = scope
3753 sasl.oauthbearer.sub.claim.name = sub
3754 sasl.oauthbearer.token.endpoint.url = null
3755 security.protocol = PLAINTEXT
3756 security.providers = null
3757 send.buffer.bytes = 131072
3758 socket.connection.setup.timeout.max.ms = 30000
3759 socket.connection.setup.timeout.ms = 10000
3760 ssl.cipher.suites = null
3761 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3762 ssl.endpoint.identification.algorithm = https
3763 ssl.engine.factory.class = null
3764 ssl.key.password = null
3765 ssl.keymanager.algorithm = SunX509
3766 ssl.keystore.certificate.chain = null
3767 ssl.keystore.key = null
3768 ssl.keystore.location = null
3769 ssl.keystore.password = null
3770 ssl.keystore.type = JKS
3771 ssl.protocol = TLSv1.3
3772 ssl.provider = null
3773 ssl.secure.random.implementation = null
3774 ssl.trustmanager.algorithm = PKIX
3775 ssl.truststore.certificates = null
3776 ssl.truststore.location = null
3777 ssl.truststore.password = null
3778 ssl.truststore.type = JKS
3779 transaction.timeout.ms = 60000
3780 transaction.two.phase.commit.enable = false
3781 transactional.id = null
3782 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3783
378414:29:53.534 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
378514:29:53.535 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Instantiated an idempotent producer.
378614:29:53.540 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
378714:29:53.541 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
378814:29:53.541 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764854993540
378914:29:53.549 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.Metadata - [Producer clientId=producer-3] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
379014:29:53.550 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-3] ProducerId set to 2 with epoch 0
379114:29:53.562 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
379214:29:53.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
379314:29:53.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
379414:29:53.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
379514:29:53.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
379614:29:53.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-3 unregistered
379714:29:53.586 [virtual-608] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
3798 allow.auto.create.topics = true
3799 auto.commit.interval.ms = 5000
3800 auto.offset.reset = earliest
3801 bootstrap.servers = [localhost:6001]
3802 check.crcs = true
3803 client.dns.lookup = use_all_dns_ips
3804 client.id = consumer-g1-1
3805 client.rack =
3806 connections.max.idle.ms = 540000
3807 default.api.timeout.ms = 60000
3808 enable.auto.commit = false
3809 enable.metrics.push = true
3810 exclude.internal.topics = true
3811 fetch.max.bytes = 52428800
3812 fetch.max.wait.ms = 500
3813 fetch.min.bytes = 1
3814 group.id = g1
3815 group.instance.id = null
3816 group.protocol = classic
3817 group.remote.assignor = null
3818 heartbeat.interval.ms = 3000
3819 interceptor.classes = []
3820 internal.leave.group.on.close = true
3821 internal.throw.on.fetch.stable.offset.unsupported = false
3822 isolation.level = read_uncommitted
3823 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3824 max.partition.fetch.bytes = 1048576
3825 max.poll.interval.ms = 300000
3826 max.poll.records = 500
3827 metadata.max.age.ms = 300000
3828 metadata.recovery.rebootstrap.trigger.ms = 300000
3829 metadata.recovery.strategy = rebootstrap
3830 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3831 metrics.num.samples = 2
3832 metrics.recording.level = INFO
3833 metrics.sample.window.ms = 30000
3834 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
3835 receive.buffer.bytes = 65536
3836 reconnect.backoff.max.ms = 1000
3837 reconnect.backoff.ms = 50
3838 request.timeout.ms = 30000
3839 retry.backoff.max.ms = 1000
3840 retry.backoff.ms = 100
3841 sasl.client.callback.handler.class = null
3842 sasl.jaas.config = null
3843 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3844 sasl.kerberos.min.time.before.relogin = 60000
3845 sasl.kerberos.service.name = null
3846 sasl.kerberos.ticket.renew.jitter = 0.05
3847 sasl.kerberos.ticket.renew.window.factor = 0.8
3848 sasl.login.callback.handler.class = null
3849 sasl.login.class = null
3850 sasl.login.connect.timeout.ms = null
3851 sasl.login.read.timeout.ms = null
3852 sasl.login.refresh.buffer.seconds = 300
3853 sasl.login.refresh.min.period.seconds = 60
3854 sasl.login.refresh.window.factor = 0.8
3855 sasl.login.refresh.window.jitter = 0.05
3856 sasl.login.retry.backoff.max.ms = 10000
3857 sasl.login.retry.backoff.ms = 100
3858 sasl.mechanism = GSSAPI
3859 sasl.oauthbearer.assertion.algorithm = RS256
3860 sasl.oauthbearer.assertion.claim.aud = null
3861 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3862 sasl.oauthbearer.assertion.claim.iss = null
3863 sasl.oauthbearer.assertion.claim.jti.include = false
3864 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3865 sasl.oauthbearer.assertion.claim.sub = null
3866 sasl.oauthbearer.assertion.file = null
3867 sasl.oauthbearer.assertion.private.key.file = null
3868 sasl.oauthbearer.assertion.private.key.passphrase = null
3869 sasl.oauthbearer.assertion.template.file = null
3870 sasl.oauthbearer.client.credentials.client.id = null
3871 sasl.oauthbearer.client.credentials.client.secret = null
3872 sasl.oauthbearer.clock.skew.seconds = 30
3873 sasl.oauthbearer.expected.audience = null
3874 sasl.oauthbearer.expected.issuer = null
3875 sasl.oauthbearer.header.urlencode = false
3876 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3877 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3878 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3879 sasl.oauthbearer.jwks.endpoint.url = null
3880 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3881 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3882 sasl.oauthbearer.scope = null
3883 sasl.oauthbearer.scope.claim.name = scope
3884 sasl.oauthbearer.sub.claim.name = sub
3885 sasl.oauthbearer.token.endpoint.url = null
3886 security.protocol = PLAINTEXT
3887 security.providers = null
3888 send.buffer.bytes = 131072
3889 session.timeout.ms = 45000
3890 share.acknowledgement.mode = implicit
3891 socket.connection.setup.timeout.max.ms = 30000
3892 socket.connection.setup.timeout.ms = 10000
3893 ssl.cipher.suites = null
3894 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3895 ssl.endpoint.identification.algorithm = https
3896 ssl.engine.factory.class = null
3897 ssl.key.password = null
3898 ssl.keymanager.algorithm = SunX509
3899 ssl.keystore.certificate.chain = null
3900 ssl.keystore.key = null
3901 ssl.keystore.location = null
3902 ssl.keystore.password = null
3903 ssl.keystore.type = JKS
3904 ssl.protocol = TLSv1.3
3905 ssl.provider = null
3906 ssl.secure.random.implementation = null
3907 ssl.trustmanager.algorithm = PKIX
3908 ssl.truststore.certificates = null
3909 ssl.truststore.location = null
3910 ssl.truststore.password = null
3911 ssl.truststore.type = JKS
3912 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3913
391414:29:53.595 [virtual-608] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
391514:29:53.638 [virtual-608] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
391614:29:53.638 [virtual-608] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
391714:29:53.638 [virtual-608] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764854993638
391814:29:53.663 [virtual-615] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g1-1, groupId=g1] Subscribed to topic(s): t1
391914:29:53.671 [virtual-615] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g1-1, groupId=g1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
392014:29:53.673 [data-plane-kafka-request-handler-5] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(__consumer_offsets) to the active controller.
392114:29:53.678 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='__consumer_offsets', numPartitions=1, replicationFactor=1, assignments=[], configs=[CreatableTopicConfig(name='compression.type', value='producer'), CreatableTopicConfig(name='cleanup.policy', value='compact'), CreatableTopicConfig(name='segment.bytes', value='104857600')]): SUCCESS
392214:29:53.679 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic __consumer_offsets with topic ID nCn9WLKxSD2Uls0Rj9SfFw.
392314:29:53.680 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration compression.type to producer
392414:29:53.680 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration cleanup.policy to compact
392514:29:53.680 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration segment.bytes to 104857600
392614:29:53.680 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition __consumer_offsets-0 with topic ID nCn9WLKxSD2Uls0Rj9SfFw and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
392714:29:53.708 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
392814:29:53.708 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(__consumer_offsets-0)
392914:29:53.708 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition __consumer_offsets-0 with topic id nCn9WLKxSD2Uls0Rj9SfFw.
393014:29:53.711 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__consumer_offsets-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
393114:29:53.711 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition __consumer_offsets-0 in /tmp/kafka-logs7572554700115704093/__consumer_offsets-0 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600}
393214:29:53.711 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0
393314:29:53.711 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0
393414:29:53.712 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader __consumer_offsets-0 with topic id Some(nCn9WLKxSD2Uls0Rj9SfFw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
393514:29:53.714 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling loading of metadata from __consumer_offsets-0 with epoch 0
393614:29:53.721 [kafka-0-metadata-loader-event-handler] INFO k.s.m.DynamicConfigPublisher - [DynamicConfigPublisher broker id=0] Updating topic __consumer_offsets with new configuration : compression.type -> producer,cleanup.policy -> compact,segment.bytes -> 104857600
393714:29:53.735 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished loading of metadata from __consumer_offsets-0 with epoch 0 in 1ms where 1ms was spent in the scheduler. Loaded 0 records which total to 0 bytes.
393814:29:53.769 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
393914:29:53.771 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
394014:29:53.781 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g1 in Empty state. Created a new member id consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1 and requesting the member to rejoin with this id.
394114:29:53.783 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: need to re-join with the given member-id: consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1
394214:29:53.784 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
394314:29:53.788 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1 joins group g1 in Empty state. Adding to the group now.
394414:29:53.789 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1 with group instance id null; client reason: need to re-join with the given member-id: consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1).
394514:29:56.791 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g1 generation 1 with 1 members.
394614:29:56.794 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1', protocol='range'}
394714:29:56.800 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Finished assignment for group at generation 1: {consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1=Assignment(partitions=[t1-0])}
394814:29:56.804 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1 for group g1 for generation 1. The group has 1 members, 0 of which are static.
394914:29:56.812 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1', protocol='range'}
395014:29:56.812 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Notifying assignor about the new Assignment(partitions=[t1-0])
395114:29:56.814 [virtual-615] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Adding newly assigned partitions: [t1-0]
395214:29:56.823 [virtual-615] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Found no committed offset for partition t1-0
395314:29:56.835 [virtual-615] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting offset for partition t1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
395414:29:57.122 [virtual-608] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3955 acks = -1
3956 batch.size = 16384
3957 bootstrap.servers = [localhost:6001]
3958 buffer.memory = 33554432
3959 client.dns.lookup = use_all_dns_ips
3960 client.id = producer-4
3961 compression.gzip.level = -1
3962 compression.lz4.level = 9
3963 compression.type = none
3964 compression.zstd.level = 3
3965 connections.max.idle.ms = 540000
3966 delivery.timeout.ms = 120000
3967 enable.idempotence = true
3968 enable.metrics.push = true
3969 interceptor.classes = []
3970 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3971 linger.ms = 5
3972 max.block.ms = 10000
3973 max.in.flight.requests.per.connection = 5
3974 max.request.size = 1048576
3975 metadata.max.age.ms = 300000
3976 metadata.max.idle.ms = 300000
3977 metadata.recovery.rebootstrap.trigger.ms = 300000
3978 metadata.recovery.strategy = rebootstrap
3979 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3980 metrics.num.samples = 2
3981 metrics.recording.level = INFO
3982 metrics.sample.window.ms = 30000
3983 partitioner.adaptive.partitioning.enable = true
3984 partitioner.availability.timeout.ms = 0
3985 partitioner.class = null
3986 partitioner.ignore.keys = false
3987 receive.buffer.bytes = 32768
3988 reconnect.backoff.max.ms = 1000
3989 reconnect.backoff.ms = 50
3990 request.timeout.ms = 30000
3991 retries = 2147483647
3992 retry.backoff.max.ms = 1000
3993 retry.backoff.ms = 1000
3994 sasl.client.callback.handler.class = null
3995 sasl.jaas.config = null
3996 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3997 sasl.kerberos.min.time.before.relogin = 60000
3998 sasl.kerberos.service.name = null
3999 sasl.kerberos.ticket.renew.jitter = 0.05
4000 sasl.kerberos.ticket.renew.window.factor = 0.8
4001 sasl.login.callback.handler.class = null
4002 sasl.login.class = null
4003 sasl.login.connect.timeout.ms = null
4004 sasl.login.read.timeout.ms = null
4005 sasl.login.refresh.buffer.seconds = 300
4006 sasl.login.refresh.min.period.seconds = 60
4007 sasl.login.refresh.window.factor = 0.8
4008 sasl.login.refresh.window.jitter = 0.05
4009 sasl.login.retry.backoff.max.ms = 10000
4010 sasl.login.retry.backoff.ms = 100
4011 sasl.mechanism = GSSAPI
4012 sasl.oauthbearer.assertion.algorithm = RS256
4013 sasl.oauthbearer.assertion.claim.aud = null
4014 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4015 sasl.oauthbearer.assertion.claim.iss = null
4016 sasl.oauthbearer.assertion.claim.jti.include = false
4017 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4018 sasl.oauthbearer.assertion.claim.sub = null
4019 sasl.oauthbearer.assertion.file = null
4020 sasl.oauthbearer.assertion.private.key.file = null
4021 sasl.oauthbearer.assertion.private.key.passphrase = null
4022 sasl.oauthbearer.assertion.template.file = null
4023 sasl.oauthbearer.client.credentials.client.id = null
4024 sasl.oauthbearer.client.credentials.client.secret = null
4025 sasl.oauthbearer.clock.skew.seconds = 30
4026 sasl.oauthbearer.expected.audience = null
4027 sasl.oauthbearer.expected.issuer = null
4028 sasl.oauthbearer.header.urlencode = false
4029 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4030 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4031 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4032 sasl.oauthbearer.jwks.endpoint.url = null
4033 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4034 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4035 sasl.oauthbearer.scope = null
4036 sasl.oauthbearer.scope.claim.name = scope
4037 sasl.oauthbearer.sub.claim.name = sub
4038 sasl.oauthbearer.token.endpoint.url = null
4039 security.protocol = PLAINTEXT
4040 security.providers = null
4041 send.buffer.bytes = 131072
4042 socket.connection.setup.timeout.max.ms = 30000
4043 socket.connection.setup.timeout.ms = 10000
4044 ssl.cipher.suites = null
4045 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4046 ssl.endpoint.identification.algorithm = https
4047 ssl.engine.factory.class = null
4048 ssl.key.password = null
4049 ssl.keymanager.algorithm = SunX509
4050 ssl.keystore.certificate.chain = null
4051 ssl.keystore.key = null
4052 ssl.keystore.location = null
4053 ssl.keystore.password = null
4054 ssl.keystore.type = JKS
4055 ssl.protocol = TLSv1.3
4056 ssl.provider = null
4057 ssl.secure.random.implementation = null
4058 ssl.trustmanager.algorithm = PKIX
4059 ssl.truststore.certificates = null
4060 ssl.truststore.location = null
4061 ssl.truststore.password = null
4062 ssl.truststore.type = JKS
4063 transaction.timeout.ms = 60000
4064 transaction.two.phase.commit.enable = false
4065 transactional.id = null
4066 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4067
406814:29:57.122 [virtual-608] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
406914:29:57.123 [virtual-608] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Instantiated an idempotent producer.
407014:29:57.125 [virtual-608] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
407114:29:57.125 [virtual-608] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
407214:29:57.125 [virtual-608] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764854997125
407314:29:57.131 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.Metadata - [Producer clientId=producer-4] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
407414:29:57.131 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-4] ProducerId set to 3 with epoch 0
407514:29:57.146 [virtual-608] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
407614:29:57.150 [virtual-608] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
407714:29:57.151 [virtual-608] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
407814:29:57.151 [virtual-608] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
407914:29:57.151 [virtual-608] INFO o.a.k.c.m.Metrics - Metrics reporters closed
408014:29:57.151 [virtual-608] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-4 unregistered
408114:29:57.155 [virtual-615] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
4082java.lang.InterruptedException: null
4083 ... 18 common frames omitted
4084Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
4085 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
4086 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
4087 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
4088 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
4089 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
4090 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
4091 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
4092 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
4093 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
4094 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
4095 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4096 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4097 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
4098 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
4099 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
4100 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4101 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4102 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
410314:29:57.155 [virtual-614] ERROR o.k.KafkaFlow$ - Exception when polling for records
4104java.lang.InterruptedException: null
4105 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
4106 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
4107 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
4108 at ox.channels.ActorRef.ask(actor.scala:64)
4109 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
4110 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
4111 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4112 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4113 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
4114 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
4115 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
4116 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4117 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4118 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
411914:29:57.174 [virtual-621] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Revoke previously assigned partitions [t1-0]
412014:29:57.174 [virtual-621] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Member consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
412114:29:57.175 [virtual-621] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting generation and member id due to: consumer pro-actively leaving the group
412214:29:57.175 [virtual-621] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: consumer pro-actively leaving the group
412314:29:57.177 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g1] Member consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
412414:29:57.178 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g1-1-a43d3b6b-e9db-45dc-8c9d-29ec135104a1) members.).
412514:29:57.178 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g1 with generation 2 is now empty.
412614:29:57.651 [virtual-621] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
412714:29:57.651 [virtual-621] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
412814:29:57.651 [virtual-621] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
412914:29:57.651 [virtual-621] INFO o.a.k.c.m.Metrics - Metrics reporters closed
413014:29:57.654 [virtual-621] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g1-1 unregistered
413114:29:57.667 [virtual-623] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4132 acks = -1
4133 batch.size = 16384
4134 bootstrap.servers = [localhost:6001]
4135 buffer.memory = 33554432
4136 client.dns.lookup = use_all_dns_ips
4137 client.id = producer-5
4138 compression.gzip.level = -1
4139 compression.lz4.level = 9
4140 compression.type = none
4141 compression.zstd.level = 3
4142 connections.max.idle.ms = 540000
4143 delivery.timeout.ms = 120000
4144 enable.idempotence = true
4145 enable.metrics.push = true
4146 interceptor.classes = []
4147 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4148 linger.ms = 5
4149 max.block.ms = 60000
4150 max.in.flight.requests.per.connection = 5
4151 max.request.size = 1048576
4152 metadata.max.age.ms = 300000
4153 metadata.max.idle.ms = 300000
4154 metadata.recovery.rebootstrap.trigger.ms = 300000
4155 metadata.recovery.strategy = rebootstrap
4156 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4157 metrics.num.samples = 2
4158 metrics.recording.level = INFO
4159 metrics.sample.window.ms = 30000
4160 partitioner.adaptive.partitioning.enable = true
4161 partitioner.availability.timeout.ms = 0
4162 partitioner.class = null
4163 partitioner.ignore.keys = false
4164 receive.buffer.bytes = 32768
4165 reconnect.backoff.max.ms = 1000
4166 reconnect.backoff.ms = 50
4167 request.timeout.ms = 30000
4168 retries = 2147483647
4169 retry.backoff.max.ms = 1000
4170 retry.backoff.ms = 100
4171 sasl.client.callback.handler.class = null
4172 sasl.jaas.config = null
4173 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4174 sasl.kerberos.min.time.before.relogin = 60000
4175 sasl.kerberos.service.name = null
4176 sasl.kerberos.ticket.renew.jitter = 0.05
4177 sasl.kerberos.ticket.renew.window.factor = 0.8
4178 sasl.login.callback.handler.class = null
4179 sasl.login.class = null
4180 sasl.login.connect.timeout.ms = null
4181 sasl.login.read.timeout.ms = null
4182 sasl.login.refresh.buffer.seconds = 300
4183 sasl.login.refresh.min.period.seconds = 60
4184 sasl.login.refresh.window.factor = 0.8
4185 sasl.login.refresh.window.jitter = 0.05
4186 sasl.login.retry.backoff.max.ms = 10000
4187 sasl.login.retry.backoff.ms = 100
4188 sasl.mechanism = GSSAPI
4189 sasl.oauthbearer.assertion.algorithm = RS256
4190 sasl.oauthbearer.assertion.claim.aud = null
4191 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4192 sasl.oauthbearer.assertion.claim.iss = null
4193 sasl.oauthbearer.assertion.claim.jti.include = false
4194 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4195 sasl.oauthbearer.assertion.claim.sub = null
4196 sasl.oauthbearer.assertion.file = null
4197 sasl.oauthbearer.assertion.private.key.file = null
4198 sasl.oauthbearer.assertion.private.key.passphrase = null
4199 sasl.oauthbearer.assertion.template.file = null
4200 sasl.oauthbearer.client.credentials.client.id = null
4201 sasl.oauthbearer.client.credentials.client.secret = null
4202 sasl.oauthbearer.clock.skew.seconds = 30
4203 sasl.oauthbearer.expected.audience = null
4204 sasl.oauthbearer.expected.issuer = null
4205 sasl.oauthbearer.header.urlencode = false
4206 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4207 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4208 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4209 sasl.oauthbearer.jwks.endpoint.url = null
4210 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4211 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4212 sasl.oauthbearer.scope = null
4213 sasl.oauthbearer.scope.claim.name = scope
4214 sasl.oauthbearer.sub.claim.name = sub
4215 sasl.oauthbearer.token.endpoint.url = null
4216 security.protocol = PLAINTEXT
4217 security.providers = null
4218 send.buffer.bytes = 131072
4219 socket.connection.setup.timeout.max.ms = 30000
4220 socket.connection.setup.timeout.ms = 10000
4221 ssl.cipher.suites = null
4222 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4223 ssl.endpoint.identification.algorithm = https
4224 ssl.engine.factory.class = null
4225 ssl.key.password = null
4226 ssl.keymanager.algorithm = SunX509
4227 ssl.keystore.certificate.chain = null
4228 ssl.keystore.key = null
4229 ssl.keystore.location = null
4230 ssl.keystore.password = null
4231 ssl.keystore.type = JKS
4232 ssl.protocol = TLSv1.3
4233 ssl.provider = null
4234 ssl.secure.random.implementation = null
4235 ssl.trustmanager.algorithm = PKIX
4236 ssl.truststore.certificates = null
4237 ssl.truststore.location = null
4238 ssl.truststore.password = null
4239 ssl.truststore.type = JKS
4240 transaction.timeout.ms = 60000
4241 transaction.two.phase.commit.enable = false
4242 transactional.id = null
4243 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4244
424514:29:57.667 [virtual-623] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
424614:29:57.668 [virtual-623] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Instantiated an idempotent producer.
424714:29:57.671 [virtual-623] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
424814:29:57.671 [virtual-623] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
424914:29:57.671 [virtual-623] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764854997671
425014:29:57.677 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.Metadata - [Producer clientId=producer-5] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
425114:29:57.677 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-5] ProducerId set to 4 with epoch 0
425214:29:57.688 [data-plane-kafka-request-handler-2] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t2) to the active controller.
425314:29:57.689 [kafka-producer-network-thread | producer-5] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-5] The metadata response from the cluster reported a recoverable issue with correlation id 5 : {t2=UNKNOWN_TOPIC_OR_PARTITION}
425414:29:57.689 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
425514:29:57.690 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t2 with topic ID F_0MdaWzQLq4SORIf8rMDQ.
425614:29:57.690 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t2-0 with topic ID F_0MdaWzQLq4SORIf8rMDQ and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
425714:29:57.716 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
425814:29:57.716 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t2-0)
425914:29:57.716 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t2-0 with topic id F_0MdaWzQLq4SORIf8rMDQ.
426014:29:57.718 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t2-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
426114:29:57.719 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t2-0 in /tmp/kafka-logs7572554700115704093/t2-0 with properties {}
426214:29:57.719 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] No checkpointed highwatermark is found for partition t2-0
426314:29:57.719 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] Log loaded for partition t2-0 with initial high watermark 0
426414:29:57.719 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t2-0 with topic id Some(F_0MdaWzQLq4SORIf8rMDQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
426514:29:57.898 [virtual-627] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
426614:29:57.901 [virtual-627] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
426714:29:57.901 [virtual-627] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
426814:29:57.901 [virtual-627] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
426914:29:57.902 [virtual-627] INFO o.a.k.c.m.Metrics - Metrics reporters closed
427014:29:57.902 [virtual-627] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-5 unregistered
427114:29:57.905 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4272 allow.auto.create.topics = true
4273 auto.commit.interval.ms = 5000
4274 auto.offset.reset = earliest
4275 bootstrap.servers = [localhost:6001]
4276 check.crcs = true
4277 client.dns.lookup = use_all_dns_ips
4278 client.id = consumer-embedded-kafka-spec-2
4279 client.rack =
4280 connections.max.idle.ms = 540000
4281 default.api.timeout.ms = 60000
4282 enable.auto.commit = false
4283 enable.metrics.push = true
4284 exclude.internal.topics = true
4285 fetch.max.bytes = 52428800
4286 fetch.max.wait.ms = 500
4287 fetch.min.bytes = 1
4288 group.id = embedded-kafka-spec
4289 group.instance.id = null
4290 group.protocol = classic
4291 group.remote.assignor = null
4292 heartbeat.interval.ms = 3000
4293 interceptor.classes = []
4294 internal.leave.group.on.close = true
4295 internal.throw.on.fetch.stable.offset.unsupported = false
4296 isolation.level = read_uncommitted
4297 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4298 max.partition.fetch.bytes = 1048576
4299 max.poll.interval.ms = 300000
4300 max.poll.records = 500
4301 metadata.max.age.ms = 300000
4302 metadata.recovery.rebootstrap.trigger.ms = 300000
4303 metadata.recovery.strategy = rebootstrap
4304 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4305 metrics.num.samples = 2
4306 metrics.recording.level = INFO
4307 metrics.sample.window.ms = 30000
4308 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4309 receive.buffer.bytes = 65536
4310 reconnect.backoff.max.ms = 1000
4311 reconnect.backoff.ms = 50
4312 request.timeout.ms = 30000
4313 retry.backoff.max.ms = 1000
4314 retry.backoff.ms = 100
4315 sasl.client.callback.handler.class = null
4316 sasl.jaas.config = null
4317 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4318 sasl.kerberos.min.time.before.relogin = 60000
4319 sasl.kerberos.service.name = null
4320 sasl.kerberos.ticket.renew.jitter = 0.05
4321 sasl.kerberos.ticket.renew.window.factor = 0.8
4322 sasl.login.callback.handler.class = null
4323 sasl.login.class = null
4324 sasl.login.connect.timeout.ms = null
4325 sasl.login.read.timeout.ms = null
4326 sasl.login.refresh.buffer.seconds = 300
4327 sasl.login.refresh.min.period.seconds = 60
4328 sasl.login.refresh.window.factor = 0.8
4329 sasl.login.refresh.window.jitter = 0.05
4330 sasl.login.retry.backoff.max.ms = 10000
4331 sasl.login.retry.backoff.ms = 100
4332 sasl.mechanism = GSSAPI
4333 sasl.oauthbearer.assertion.algorithm = RS256
4334 sasl.oauthbearer.assertion.claim.aud = null
4335 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4336 sasl.oauthbearer.assertion.claim.iss = null
4337 sasl.oauthbearer.assertion.claim.jti.include = false
4338 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4339 sasl.oauthbearer.assertion.claim.sub = null
4340 sasl.oauthbearer.assertion.file = null
4341 sasl.oauthbearer.assertion.private.key.file = null
4342 sasl.oauthbearer.assertion.private.key.passphrase = null
4343 sasl.oauthbearer.assertion.template.file = null
4344 sasl.oauthbearer.client.credentials.client.id = null
4345 sasl.oauthbearer.client.credentials.client.secret = null
4346 sasl.oauthbearer.clock.skew.seconds = 30
4347 sasl.oauthbearer.expected.audience = null
4348 sasl.oauthbearer.expected.issuer = null
4349 sasl.oauthbearer.header.urlencode = false
4350 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4351 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4352 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4353 sasl.oauthbearer.jwks.endpoint.url = null
4354 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4355 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4356 sasl.oauthbearer.scope = null
4357 sasl.oauthbearer.scope.claim.name = scope
4358 sasl.oauthbearer.sub.claim.name = sub
4359 sasl.oauthbearer.token.endpoint.url = null
4360 security.protocol = PLAINTEXT
4361 security.providers = null
4362 send.buffer.bytes = 131072
4363 session.timeout.ms = 45000
4364 share.acknowledgement.mode = implicit
4365 socket.connection.setup.timeout.max.ms = 30000
4366 socket.connection.setup.timeout.ms = 10000
4367 ssl.cipher.suites = null
4368 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4369 ssl.endpoint.identification.algorithm = https
4370 ssl.engine.factory.class = null
4371 ssl.key.password = null
4372 ssl.keymanager.algorithm = SunX509
4373 ssl.keystore.certificate.chain = null
4374 ssl.keystore.key = null
4375 ssl.keystore.location = null
4376 ssl.keystore.password = null
4377 ssl.keystore.type = JKS
4378 ssl.protocol = TLSv1.3
4379 ssl.provider = null
4380 ssl.secure.random.implementation = null
4381 ssl.trustmanager.algorithm = PKIX
4382 ssl.truststore.certificates = null
4383 ssl.truststore.location = null
4384 ssl.truststore.password = null
4385 ssl.truststore.type = JKS
4386 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4387
438814:29:57.906 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
438914:29:57.910 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
439014:29:57.910 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
439114:29:57.911 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764854997910
439214:29:57.913 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Subscribed to topic(s): t2
439314:29:57.918 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
439414:29:57.922 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
439514:29:57.922 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439614:29:57.924 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca and requesting the member to rejoin with this id.
439714:29:57.925 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca
439814:29:57.925 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439914:29:57.926 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca joins group embedded-kafka-spec in Empty state. Adding to the group now.
440014:29:57.926 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca).
440114:30:00.926 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 1 with 1 members.
440214:30:00.927 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca', protocol='range'}
440314:30:00.928 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Finished assignment for group at generation 1: {consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca=Assignment(partitions=[t2-0])}
440414:30:00.928 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca for group embedded-kafka-spec for generation 1. The group has 1 members, 0 of which are static.
440514:30:00.935 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca', protocol='range'}
440614:30:00.936 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t2-0])
440714:30:00.936 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t2-0]
440814:30:00.937 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Found no committed offset for partition t2-0
440914:30:00.939 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting offset for partition t2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
441014:30:07.192 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t2-0]
441114:30:07.192 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
441214:30:07.193 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
441314:30:07.193 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
441414:30:07.193 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
441514:30:07.193 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-2-c6439380-920c-40ba-9a27-026ea53a74ca) members.).
441614:30:07.193 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 2 is now empty.
441714:30:07.201 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
441814:30:07.201 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
441914:30:07.201 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
442014:30:07.201 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
442114:30:07.204 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-2 unregistered
442214:30:07.207 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4423 acks = -1
4424 batch.size = 16384
4425 bootstrap.servers = [localhost:6001]
4426 buffer.memory = 33554432
4427 client.dns.lookup = use_all_dns_ips
4428 client.id = producer-6
4429 compression.gzip.level = -1
4430 compression.lz4.level = 9
4431 compression.type = none
4432 compression.zstd.level = 3
4433 connections.max.idle.ms = 540000
4434 delivery.timeout.ms = 120000
4435 enable.idempotence = true
4436 enable.metrics.push = true
4437 interceptor.classes = []
4438 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4439 linger.ms = 5
4440 max.block.ms = 10000
4441 max.in.flight.requests.per.connection = 5
4442 max.request.size = 1048576
4443 metadata.max.age.ms = 300000
4444 metadata.max.idle.ms = 300000
4445 metadata.recovery.rebootstrap.trigger.ms = 300000
4446 metadata.recovery.strategy = rebootstrap
4447 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4448 metrics.num.samples = 2
4449 metrics.recording.level = INFO
4450 metrics.sample.window.ms = 30000
4451 partitioner.adaptive.partitioning.enable = true
4452 partitioner.availability.timeout.ms = 0
4453 partitioner.class = null
4454 partitioner.ignore.keys = false
4455 receive.buffer.bytes = 32768
4456 reconnect.backoff.max.ms = 1000
4457 reconnect.backoff.ms = 50
4458 request.timeout.ms = 30000
4459 retries = 2147483647
4460 retry.backoff.max.ms = 1000
4461 retry.backoff.ms = 1000
4462 sasl.client.callback.handler.class = null
4463 sasl.jaas.config = null
4464 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4465 sasl.kerberos.min.time.before.relogin = 60000
4466 sasl.kerberos.service.name = null
4467 sasl.kerberos.ticket.renew.jitter = 0.05
4468 sasl.kerberos.ticket.renew.window.factor = 0.8
4469 sasl.login.callback.handler.class = null
4470 sasl.login.class = null
4471 sasl.login.connect.timeout.ms = null
4472 sasl.login.read.timeout.ms = null
4473 sasl.login.refresh.buffer.seconds = 300
4474 sasl.login.refresh.min.period.seconds = 60
4475 sasl.login.refresh.window.factor = 0.8
4476 sasl.login.refresh.window.jitter = 0.05
4477 sasl.login.retry.backoff.max.ms = 10000
4478 sasl.login.retry.backoff.ms = 100
4479 sasl.mechanism = GSSAPI
4480 sasl.oauthbearer.assertion.algorithm = RS256
4481 sasl.oauthbearer.assertion.claim.aud = null
4482 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4483 sasl.oauthbearer.assertion.claim.iss = null
4484 sasl.oauthbearer.assertion.claim.jti.include = false
4485 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4486 sasl.oauthbearer.assertion.claim.sub = null
4487 sasl.oauthbearer.assertion.file = null
4488 sasl.oauthbearer.assertion.private.key.file = null
4489 sasl.oauthbearer.assertion.private.key.passphrase = null
4490 sasl.oauthbearer.assertion.template.file = null
4491 sasl.oauthbearer.client.credentials.client.id = null
4492 sasl.oauthbearer.client.credentials.client.secret = null
4493 sasl.oauthbearer.clock.skew.seconds = 30
4494 sasl.oauthbearer.expected.audience = null
4495 sasl.oauthbearer.expected.issuer = null
4496 sasl.oauthbearer.header.urlencode = false
4497 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4498 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4499 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4500 sasl.oauthbearer.jwks.endpoint.url = null
4501 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4502 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4503 sasl.oauthbearer.scope = null
4504 sasl.oauthbearer.scope.claim.name = scope
4505 sasl.oauthbearer.sub.claim.name = sub
4506 sasl.oauthbearer.token.endpoint.url = null
4507 security.protocol = PLAINTEXT
4508 security.providers = null
4509 send.buffer.bytes = 131072
4510 socket.connection.setup.timeout.max.ms = 30000
4511 socket.connection.setup.timeout.ms = 10000
4512 ssl.cipher.suites = null
4513 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4514 ssl.endpoint.identification.algorithm = https
4515 ssl.engine.factory.class = null
4516 ssl.key.password = null
4517 ssl.keymanager.algorithm = SunX509
4518 ssl.keystore.certificate.chain = null
4519 ssl.keystore.key = null
4520 ssl.keystore.location = null
4521 ssl.keystore.password = null
4522 ssl.keystore.type = JKS
4523 ssl.protocol = TLSv1.3
4524 ssl.provider = null
4525 ssl.secure.random.implementation = null
4526 ssl.trustmanager.algorithm = PKIX
4527 ssl.truststore.certificates = null
4528 ssl.truststore.location = null
4529 ssl.truststore.password = null
4530 ssl.truststore.type = JKS
4531 transaction.timeout.ms = 60000
4532 transaction.two.phase.commit.enable = false
4533 transactional.id = null
4534 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4535
453614:30:07.207 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
453714:30:07.208 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Instantiated an idempotent producer.
453814:30:07.210 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
453914:30:07.210 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
454014:30:07.210 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855007210
454114:30:07.214 [data-plane-kafka-request-handler-6] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_1) to the active controller.
454214:30:07.215 [kafka-producer-network-thread | producer-6] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-6] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t3_1=UNKNOWN_TOPIC_OR_PARTITION}
454314:30:07.215 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.Metadata - [Producer clientId=producer-6] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
454414:30:07.215 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
454514:30:07.216 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-6] ProducerId set to 5 with epoch 0
454614:30:07.216 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_1 with topic ID UYiuJ6tVQKCX7_F0bGR-Nw.
454714:30:07.216 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_1-0 with topic ID UYiuJ6tVQKCX7_F0bGR-Nw and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
454814:30:07.242 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
454914:30:07.242 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_1-0)
455014:30:07.242 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_1-0 with topic id UYiuJ6tVQKCX7_F0bGR-Nw.
455114:30:07.245 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_1-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
455214:30:07.246 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_1-0 in /tmp/kafka-logs7572554700115704093/t3_1-0 with properties {}
455314:30:07.246 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] No checkpointed highwatermark is found for partition t3_1-0
455414:30:07.246 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] Log loaded for partition t3_1-0 with initial high watermark 0
455514:30:07.246 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_1-0 with topic id Some(UYiuJ6tVQKCX7_F0bGR-Nw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
455614:30:08.227 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
455714:30:08.229 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
455814:30:08.229 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
455914:30:08.229 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
456014:30:08.229 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
456114:30:08.229 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-6 unregistered
456214:30:08.230 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4563 acks = -1
4564 batch.size = 16384
4565 bootstrap.servers = [localhost:6001]
4566 buffer.memory = 33554432
4567 client.dns.lookup = use_all_dns_ips
4568 client.id = producer-7
4569 compression.gzip.level = -1
4570 compression.lz4.level = 9
4571 compression.type = none
4572 compression.zstd.level = 3
4573 connections.max.idle.ms = 540000
4574 delivery.timeout.ms = 120000
4575 enable.idempotence = true
4576 enable.metrics.push = true
4577 interceptor.classes = []
4578 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4579 linger.ms = 5
4580 max.block.ms = 10000
4581 max.in.flight.requests.per.connection = 5
4582 max.request.size = 1048576
4583 metadata.max.age.ms = 300000
4584 metadata.max.idle.ms = 300000
4585 metadata.recovery.rebootstrap.trigger.ms = 300000
4586 metadata.recovery.strategy = rebootstrap
4587 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4588 metrics.num.samples = 2
4589 metrics.recording.level = INFO
4590 metrics.sample.window.ms = 30000
4591 partitioner.adaptive.partitioning.enable = true
4592 partitioner.availability.timeout.ms = 0
4593 partitioner.class = null
4594 partitioner.ignore.keys = false
4595 receive.buffer.bytes = 32768
4596 reconnect.backoff.max.ms = 1000
4597 reconnect.backoff.ms = 50
4598 request.timeout.ms = 30000
4599 retries = 2147483647
4600 retry.backoff.max.ms = 1000
4601 retry.backoff.ms = 1000
4602 sasl.client.callback.handler.class = null
4603 sasl.jaas.config = null
4604 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4605 sasl.kerberos.min.time.before.relogin = 60000
4606 sasl.kerberos.service.name = null
4607 sasl.kerberos.ticket.renew.jitter = 0.05
4608 sasl.kerberos.ticket.renew.window.factor = 0.8
4609 sasl.login.callback.handler.class = null
4610 sasl.login.class = null
4611 sasl.login.connect.timeout.ms = null
4612 sasl.login.read.timeout.ms = null
4613 sasl.login.refresh.buffer.seconds = 300
4614 sasl.login.refresh.min.period.seconds = 60
4615 sasl.login.refresh.window.factor = 0.8
4616 sasl.login.refresh.window.jitter = 0.05
4617 sasl.login.retry.backoff.max.ms = 10000
4618 sasl.login.retry.backoff.ms = 100
4619 sasl.mechanism = GSSAPI
4620 sasl.oauthbearer.assertion.algorithm = RS256
4621 sasl.oauthbearer.assertion.claim.aud = null
4622 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4623 sasl.oauthbearer.assertion.claim.iss = null
4624 sasl.oauthbearer.assertion.claim.jti.include = false
4625 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4626 sasl.oauthbearer.assertion.claim.sub = null
4627 sasl.oauthbearer.assertion.file = null
4628 sasl.oauthbearer.assertion.private.key.file = null
4629 sasl.oauthbearer.assertion.private.key.passphrase = null
4630 sasl.oauthbearer.assertion.template.file = null
4631 sasl.oauthbearer.client.credentials.client.id = null
4632 sasl.oauthbearer.client.credentials.client.secret = null
4633 sasl.oauthbearer.clock.skew.seconds = 30
4634 sasl.oauthbearer.expected.audience = null
4635 sasl.oauthbearer.expected.issuer = null
4636 sasl.oauthbearer.header.urlencode = false
4637 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4638 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4639 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4640 sasl.oauthbearer.jwks.endpoint.url = null
4641 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4642 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4643 sasl.oauthbearer.scope = null
4644 sasl.oauthbearer.scope.claim.name = scope
4645 sasl.oauthbearer.sub.claim.name = sub
4646 sasl.oauthbearer.token.endpoint.url = null
4647 security.protocol = PLAINTEXT
4648 security.providers = null
4649 send.buffer.bytes = 131072
4650 socket.connection.setup.timeout.max.ms = 30000
4651 socket.connection.setup.timeout.ms = 10000
4652 ssl.cipher.suites = null
4653 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4654 ssl.endpoint.identification.algorithm = https
4655 ssl.engine.factory.class = null
4656 ssl.key.password = null
4657 ssl.keymanager.algorithm = SunX509
4658 ssl.keystore.certificate.chain = null
4659 ssl.keystore.key = null
4660 ssl.keystore.location = null
4661 ssl.keystore.password = null
4662 ssl.keystore.type = JKS
4663 ssl.protocol = TLSv1.3
4664 ssl.provider = null
4665 ssl.secure.random.implementation = null
4666 ssl.trustmanager.algorithm = PKIX
4667 ssl.truststore.certificates = null
4668 ssl.truststore.location = null
4669 ssl.truststore.password = null
4670 ssl.truststore.type = JKS
4671 transaction.timeout.ms = 60000
4672 transaction.two.phase.commit.enable = false
4673 transactional.id = null
4674 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4675
467614:30:08.230 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
467714:30:08.230 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Instantiated an idempotent producer.
467814:30:08.233 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
467914:30:08.236 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
468014:30:08.236 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.Metadata - [Producer clientId=producer-7] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
468114:30:08.236 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855008233
468214:30:08.236 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-7] ProducerId set to 6 with epoch 0
468314:30:08.246 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
468414:30:08.247 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
468514:30:08.247 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
468614:30:08.248 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
468714:30:08.248 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
468814:30:08.248 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-7 unregistered
468914:30:08.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4690 acks = -1
4691 batch.size = 16384
4692 bootstrap.servers = [localhost:6001]
4693 buffer.memory = 33554432
4694 client.dns.lookup = use_all_dns_ips
4695 client.id = producer-8
4696 compression.gzip.level = -1
4697 compression.lz4.level = 9
4698 compression.type = none
4699 compression.zstd.level = 3
4700 connections.max.idle.ms = 540000
4701 delivery.timeout.ms = 120000
4702 enable.idempotence = true
4703 enable.metrics.push = true
4704 interceptor.classes = []
4705 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4706 linger.ms = 5
4707 max.block.ms = 10000
4708 max.in.flight.requests.per.connection = 5
4709 max.request.size = 1048576
4710 metadata.max.age.ms = 300000
4711 metadata.max.idle.ms = 300000
4712 metadata.recovery.rebootstrap.trigger.ms = 300000
4713 metadata.recovery.strategy = rebootstrap
4714 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4715 metrics.num.samples = 2
4716 metrics.recording.level = INFO
4717 metrics.sample.window.ms = 30000
4718 partitioner.adaptive.partitioning.enable = true
4719 partitioner.availability.timeout.ms = 0
4720 partitioner.class = null
4721 partitioner.ignore.keys = false
4722 receive.buffer.bytes = 32768
4723 reconnect.backoff.max.ms = 1000
4724 reconnect.backoff.ms = 50
4725 request.timeout.ms = 30000
4726 retries = 2147483647
4727 retry.backoff.max.ms = 1000
4728 retry.backoff.ms = 1000
4729 sasl.client.callback.handler.class = null
4730 sasl.jaas.config = null
4731 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4732 sasl.kerberos.min.time.before.relogin = 60000
4733 sasl.kerberos.service.name = null
4734 sasl.kerberos.ticket.renew.jitter = 0.05
4735 sasl.kerberos.ticket.renew.window.factor = 0.8
4736 sasl.login.callback.handler.class = null
4737 sasl.login.class = null
4738 sasl.login.connect.timeout.ms = null
4739 sasl.login.read.timeout.ms = null
4740 sasl.login.refresh.buffer.seconds = 300
4741 sasl.login.refresh.min.period.seconds = 60
4742 sasl.login.refresh.window.factor = 0.8
4743 sasl.login.refresh.window.jitter = 0.05
4744 sasl.login.retry.backoff.max.ms = 10000
4745 sasl.login.retry.backoff.ms = 100
4746 sasl.mechanism = GSSAPI
4747 sasl.oauthbearer.assertion.algorithm = RS256
4748 sasl.oauthbearer.assertion.claim.aud = null
4749 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4750 sasl.oauthbearer.assertion.claim.iss = null
4751 sasl.oauthbearer.assertion.claim.jti.include = false
4752 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4753 sasl.oauthbearer.assertion.claim.sub = null
4754 sasl.oauthbearer.assertion.file = null
4755 sasl.oauthbearer.assertion.private.key.file = null
4756 sasl.oauthbearer.assertion.private.key.passphrase = null
4757 sasl.oauthbearer.assertion.template.file = null
4758 sasl.oauthbearer.client.credentials.client.id = null
4759 sasl.oauthbearer.client.credentials.client.secret = null
4760 sasl.oauthbearer.clock.skew.seconds = 30
4761 sasl.oauthbearer.expected.audience = null
4762 sasl.oauthbearer.expected.issuer = null
4763 sasl.oauthbearer.header.urlencode = false
4764 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4765 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4766 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4767 sasl.oauthbearer.jwks.endpoint.url = null
4768 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4769 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4770 sasl.oauthbearer.scope = null
4771 sasl.oauthbearer.scope.claim.name = scope
4772 sasl.oauthbearer.sub.claim.name = sub
4773 sasl.oauthbearer.token.endpoint.url = null
4774 security.protocol = PLAINTEXT
4775 security.providers = null
4776 send.buffer.bytes = 131072
4777 socket.connection.setup.timeout.max.ms = 30000
4778 socket.connection.setup.timeout.ms = 10000
4779 ssl.cipher.suites = null
4780 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4781 ssl.endpoint.identification.algorithm = https
4782 ssl.engine.factory.class = null
4783 ssl.key.password = null
4784 ssl.keymanager.algorithm = SunX509
4785 ssl.keystore.certificate.chain = null
4786 ssl.keystore.key = null
4787 ssl.keystore.location = null
4788 ssl.keystore.password = null
4789 ssl.keystore.type = JKS
4790 ssl.protocol = TLSv1.3
4791 ssl.provider = null
4792 ssl.secure.random.implementation = null
4793 ssl.trustmanager.algorithm = PKIX
4794 ssl.truststore.certificates = null
4795 ssl.truststore.location = null
4796 ssl.truststore.password = null
4797 ssl.truststore.type = JKS
4798 transaction.timeout.ms = 60000
4799 transaction.two.phase.commit.enable = false
4800 transactional.id = null
4801 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4802
480314:30:08.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
480414:30:08.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Instantiated an idempotent producer.
480514:30:08.251 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
480614:30:08.252 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
480714:30:08.253 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855008251
480814:30:08.256 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.Metadata - [Producer clientId=producer-8] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
480914:30:08.256 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-8] ProducerId set to 7 with epoch 0
481014:30:08.264 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
481114:30:08.266 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
481214:30:08.266 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
481314:30:08.267 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
481414:30:08.267 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
481514:30:08.267 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-8 unregistered
481614:30:08.268 [virtual-632] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4817 allow.auto.create.topics = true
4818 auto.commit.interval.ms = 5000
4819 auto.offset.reset = earliest
4820 bootstrap.servers = [localhost:6001]
4821 check.crcs = true
4822 client.dns.lookup = use_all_dns_ips
4823 client.id = consumer-g3_1-3
4824 client.rack =
4825 connections.max.idle.ms = 540000
4826 default.api.timeout.ms = 60000
4827 enable.auto.commit = false
4828 enable.metrics.push = true
4829 exclude.internal.topics = true
4830 fetch.max.bytes = 52428800
4831 fetch.max.wait.ms = 500
4832 fetch.min.bytes = 1
4833 group.id = g3_1
4834 group.instance.id = null
4835 group.protocol = classic
4836 group.remote.assignor = null
4837 heartbeat.interval.ms = 3000
4838 interceptor.classes = []
4839 internal.leave.group.on.close = true
4840 internal.throw.on.fetch.stable.offset.unsupported = false
4841 isolation.level = read_uncommitted
4842 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4843 max.partition.fetch.bytes = 1048576
4844 max.poll.interval.ms = 300000
4845 max.poll.records = 500
4846 metadata.max.age.ms = 300000
4847 metadata.recovery.rebootstrap.trigger.ms = 300000
4848 metadata.recovery.strategy = rebootstrap
4849 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4850 metrics.num.samples = 2
4851 metrics.recording.level = INFO
4852 metrics.sample.window.ms = 30000
4853 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4854 receive.buffer.bytes = 65536
4855 reconnect.backoff.max.ms = 1000
4856 reconnect.backoff.ms = 50
4857 request.timeout.ms = 30000
4858 retry.backoff.max.ms = 1000
4859 retry.backoff.ms = 100
4860 sasl.client.callback.handler.class = null
4861 sasl.jaas.config = null
4862 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4863 sasl.kerberos.min.time.before.relogin = 60000
4864 sasl.kerberos.service.name = null
4865 sasl.kerberos.ticket.renew.jitter = 0.05
4866 sasl.kerberos.ticket.renew.window.factor = 0.8
4867 sasl.login.callback.handler.class = null
4868 sasl.login.class = null
4869 sasl.login.connect.timeout.ms = null
4870 sasl.login.read.timeout.ms = null
4871 sasl.login.refresh.buffer.seconds = 300
4872 sasl.login.refresh.min.period.seconds = 60
4873 sasl.login.refresh.window.factor = 0.8
4874 sasl.login.refresh.window.jitter = 0.05
4875 sasl.login.retry.backoff.max.ms = 10000
4876 sasl.login.retry.backoff.ms = 100
4877 sasl.mechanism = GSSAPI
4878 sasl.oauthbearer.assertion.algorithm = RS256
4879 sasl.oauthbearer.assertion.claim.aud = null
4880 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4881 sasl.oauthbearer.assertion.claim.iss = null
4882 sasl.oauthbearer.assertion.claim.jti.include = false
4883 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4884 sasl.oauthbearer.assertion.claim.sub = null
4885 sasl.oauthbearer.assertion.file = null
4886 sasl.oauthbearer.assertion.private.key.file = null
4887 sasl.oauthbearer.assertion.private.key.passphrase = null
4888 sasl.oauthbearer.assertion.template.file = null
4889 sasl.oauthbearer.client.credentials.client.id = null
4890 sasl.oauthbearer.client.credentials.client.secret = null
4891 sasl.oauthbearer.clock.skew.seconds = 30
4892 sasl.oauthbearer.expected.audience = null
4893 sasl.oauthbearer.expected.issuer = null
4894 sasl.oauthbearer.header.urlencode = false
4895 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4896 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4897 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4898 sasl.oauthbearer.jwks.endpoint.url = null
4899 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4900 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4901 sasl.oauthbearer.scope = null
4902 sasl.oauthbearer.scope.claim.name = scope
4903 sasl.oauthbearer.sub.claim.name = sub
4904 sasl.oauthbearer.token.endpoint.url = null
4905 security.protocol = PLAINTEXT
4906 security.providers = null
4907 send.buffer.bytes = 131072
4908 session.timeout.ms = 45000
4909 share.acknowledgement.mode = implicit
4910 socket.connection.setup.timeout.max.ms = 30000
4911 socket.connection.setup.timeout.ms = 10000
4912 ssl.cipher.suites = null
4913 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4914 ssl.endpoint.identification.algorithm = https
4915 ssl.engine.factory.class = null
4916 ssl.key.password = null
4917 ssl.keymanager.algorithm = SunX509
4918 ssl.keystore.certificate.chain = null
4919 ssl.keystore.key = null
4920 ssl.keystore.location = null
4921 ssl.keystore.password = null
4922 ssl.keystore.type = JKS
4923 ssl.protocol = TLSv1.3
4924 ssl.provider = null
4925 ssl.secure.random.implementation = null
4926 ssl.trustmanager.algorithm = PKIX
4927 ssl.truststore.certificates = null
4928 ssl.truststore.location = null
4929 ssl.truststore.password = null
4930 ssl.truststore.type = JKS
4931 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4932
493314:30:08.268 [virtual-632] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
493414:30:08.269 [virtual-634] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4935 allow.auto.create.topics = true
4936 auto.commit.interval.ms = 5000
4937 auto.offset.reset = earliest
4938 bootstrap.servers = [localhost:6001]
4939 check.crcs = true
4940 client.dns.lookup = use_all_dns_ips
4941 client.id = consumer-g3_1-4
4942 client.rack =
4943 connections.max.idle.ms = 540000
4944 default.api.timeout.ms = 60000
4945 enable.auto.commit = false
4946 enable.metrics.push = true
4947 exclude.internal.topics = true
4948 fetch.max.bytes = 52428800
4949 fetch.max.wait.ms = 500
4950 fetch.min.bytes = 1
4951 group.id = g3_1
4952 group.instance.id = null
4953 group.protocol = classic
4954 group.remote.assignor = null
4955 heartbeat.interval.ms = 3000
4956 interceptor.classes = []
4957 internal.leave.group.on.close = true
4958 internal.throw.on.fetch.stable.offset.unsupported = false
4959 isolation.level = read_uncommitted
4960 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4961 max.partition.fetch.bytes = 1048576
4962 max.poll.interval.ms = 300000
4963 max.poll.records = 500
4964 metadata.max.age.ms = 300000
4965 metadata.recovery.rebootstrap.trigger.ms = 300000
4966 metadata.recovery.strategy = rebootstrap
4967 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4968 metrics.num.samples = 2
4969 metrics.recording.level = INFO
4970 metrics.sample.window.ms = 30000
4971 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4972 receive.buffer.bytes = 65536
4973 reconnect.backoff.max.ms = 1000
4974 reconnect.backoff.ms = 50
4975 request.timeout.ms = 30000
4976 retry.backoff.max.ms = 1000
4977 retry.backoff.ms = 100
4978 sasl.client.callback.handler.class = null
4979 sasl.jaas.config = null
4980 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4981 sasl.kerberos.min.time.before.relogin = 60000
4982 sasl.kerberos.service.name = null
4983 sasl.kerberos.ticket.renew.jitter = 0.05
4984 sasl.kerberos.ticket.renew.window.factor = 0.8
4985 sasl.login.callback.handler.class = null
4986 sasl.login.class = null
4987 sasl.login.connect.timeout.ms = null
4988 sasl.login.read.timeout.ms = null
4989 sasl.login.refresh.buffer.seconds = 300
4990 sasl.login.refresh.min.period.seconds = 60
4991 sasl.login.refresh.window.factor = 0.8
4992 sasl.login.refresh.window.jitter = 0.05
4993 sasl.login.retry.backoff.max.ms = 10000
4994 sasl.login.retry.backoff.ms = 100
4995 sasl.mechanism = GSSAPI
4996 sasl.oauthbearer.assertion.algorithm = RS256
4997 sasl.oauthbearer.assertion.claim.aud = null
4998 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4999 sasl.oauthbearer.assertion.claim.iss = null
5000 sasl.oauthbearer.assertion.claim.jti.include = false
5001 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5002 sasl.oauthbearer.assertion.claim.sub = null
5003 sasl.oauthbearer.assertion.file = null
5004 sasl.oauthbearer.assertion.private.key.file = null
5005 sasl.oauthbearer.assertion.private.key.passphrase = null
5006 sasl.oauthbearer.assertion.template.file = null
5007 sasl.oauthbearer.client.credentials.client.id = null
5008 sasl.oauthbearer.client.credentials.client.secret = null
5009 sasl.oauthbearer.clock.skew.seconds = 30
5010 sasl.oauthbearer.expected.audience = null
5011 sasl.oauthbearer.expected.issuer = null
5012 sasl.oauthbearer.header.urlencode = false
5013 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5014 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5015 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5016 sasl.oauthbearer.jwks.endpoint.url = null
5017 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5018 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5019 sasl.oauthbearer.scope = null
5020 sasl.oauthbearer.scope.claim.name = scope
5021 sasl.oauthbearer.sub.claim.name = sub
5022 sasl.oauthbearer.token.endpoint.url = null
5023 security.protocol = PLAINTEXT
5024 security.providers = null
5025 send.buffer.bytes = 131072
5026 session.timeout.ms = 45000
5027 share.acknowledgement.mode = implicit
5028 socket.connection.setup.timeout.max.ms = 30000
5029 socket.connection.setup.timeout.ms = 10000
5030 ssl.cipher.suites = null
5031 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5032 ssl.endpoint.identification.algorithm = https
5033 ssl.engine.factory.class = null
5034 ssl.key.password = null
5035 ssl.keymanager.algorithm = SunX509
5036 ssl.keystore.certificate.chain = null
5037 ssl.keystore.key = null
5038 ssl.keystore.location = null
5039 ssl.keystore.password = null
5040 ssl.keystore.type = JKS
5041 ssl.protocol = TLSv1.3
5042 ssl.provider = null
5043 ssl.secure.random.implementation = null
5044 ssl.trustmanager.algorithm = PKIX
5045 ssl.truststore.certificates = null
5046 ssl.truststore.location = null
5047 ssl.truststore.password = null
5048 ssl.truststore.type = JKS
5049 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5050
505114:30:08.270 [virtual-634] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
505214:30:08.273 [virtual-632] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505314:30:08.274 [virtual-632] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505414:30:08.274 [virtual-632] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855008273
505514:30:08.274 [virtual-637] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Subscribed to topic(s): t3_2
505614:30:08.276 [virtual-634] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505714:30:08.277 [virtual-634] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505814:30:08.277 [virtual-634] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855008276
505914:30:08.278 [data-plane-kafka-request-handler-7] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_2) to the active controller.
506014:30:08.279 [virtual-637] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t3_2=UNKNOWN_TOPIC_OR_PARTITION}
506114:30:08.279 [virtual-637] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
506214:30:08.280 [virtual-634] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5063 acks = -1
5064 batch.size = 16384
5065 bootstrap.servers = [localhost:6001]
5066 buffer.memory = 33554432
5067 client.dns.lookup = use_all_dns_ips
5068 client.id = producer-9
5069 compression.gzip.level = -1
5070 compression.lz4.level = 9
5071 compression.type = none
5072 compression.zstd.level = 3
5073 connections.max.idle.ms = 540000
5074 delivery.timeout.ms = 120000
5075 enable.idempotence = true
5076 enable.metrics.push = true
5077 interceptor.classes = []
5078 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5079 linger.ms = 5
5080 max.block.ms = 60000
5081 max.in.flight.requests.per.connection = 5
5082 max.request.size = 1048576
5083 metadata.max.age.ms = 300000
5084 metadata.max.idle.ms = 300000
5085 metadata.recovery.rebootstrap.trigger.ms = 300000
5086 metadata.recovery.strategy = rebootstrap
5087 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5088 metrics.num.samples = 2
5089 metrics.recording.level = INFO
5090 metrics.sample.window.ms = 30000
5091 partitioner.adaptive.partitioning.enable = true
5092 partitioner.availability.timeout.ms = 0
5093 partitioner.class = null
5094 partitioner.ignore.keys = false
5095 receive.buffer.bytes = 32768
5096 reconnect.backoff.max.ms = 1000
5097 reconnect.backoff.ms = 50
5098 request.timeout.ms = 30000
5099 retries = 2147483647
5100 retry.backoff.max.ms = 1000
5101 retry.backoff.ms = 100
5102 sasl.client.callback.handler.class = null
5103 sasl.jaas.config = null
5104 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5105 sasl.kerberos.min.time.before.relogin = 60000
5106 sasl.kerberos.service.name = null
5107 sasl.kerberos.ticket.renew.jitter = 0.05
5108 sasl.kerberos.ticket.renew.window.factor = 0.8
5109 sasl.login.callback.handler.class = null
5110 sasl.login.class = null
5111 sasl.login.connect.timeout.ms = null
5112 sasl.login.read.timeout.ms = null
5113 sasl.login.refresh.buffer.seconds = 300
5114 sasl.login.refresh.min.period.seconds = 60
5115 sasl.login.refresh.window.factor = 0.8
5116 sasl.login.refresh.window.jitter = 0.05
5117 sasl.login.retry.backoff.max.ms = 10000
5118 sasl.login.retry.backoff.ms = 100
5119 sasl.mechanism = GSSAPI
5120 sasl.oauthbearer.assertion.algorithm = RS256
5121 sasl.oauthbearer.assertion.claim.aud = null
5122 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5123 sasl.oauthbearer.assertion.claim.iss = null
5124 sasl.oauthbearer.assertion.claim.jti.include = false
5125 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5126 sasl.oauthbearer.assertion.claim.sub = null
5127 sasl.oauthbearer.assertion.file = null
5128 sasl.oauthbearer.assertion.private.key.file = null
5129 sasl.oauthbearer.assertion.private.key.passphrase = null
5130 sasl.oauthbearer.assertion.template.file = null
5131 sasl.oauthbearer.client.credentials.client.id = null
5132 sasl.oauthbearer.client.credentials.client.secret = null
5133 sasl.oauthbearer.clock.skew.seconds = 30
5134 sasl.oauthbearer.expected.audience = null
5135 sasl.oauthbearer.expected.issuer = null
5136 sasl.oauthbearer.header.urlencode = false
5137 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5138 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5139 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5140 sasl.oauthbearer.jwks.endpoint.url = null
5141 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5142 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5143 sasl.oauthbearer.scope = null
5144 sasl.oauthbearer.scope.claim.name = scope
5145 sasl.oauthbearer.sub.claim.name = sub
5146 sasl.oauthbearer.token.endpoint.url = null
5147 security.protocol = PLAINTEXT
5148 security.providers = null
5149 send.buffer.bytes = 131072
5150 socket.connection.setup.timeout.max.ms = 30000
5151 socket.connection.setup.timeout.ms = 10000
5152 ssl.cipher.suites = null
5153 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5154 ssl.endpoint.identification.algorithm = https
5155 ssl.engine.factory.class = null
5156 ssl.key.password = null
5157 ssl.keymanager.algorithm = SunX509
5158 ssl.keystore.certificate.chain = null
5159 ssl.keystore.key = null
5160 ssl.keystore.location = null
5161 ssl.keystore.password = null
5162 ssl.keystore.type = JKS
5163 ssl.protocol = TLSv1.3
5164 ssl.provider = null
5165 ssl.secure.random.implementation = null
5166 ssl.trustmanager.algorithm = PKIX
5167 ssl.truststore.certificates = null
5168 ssl.truststore.location = null
5169 ssl.truststore.password = null
5170 ssl.truststore.type = JKS
5171 transaction.timeout.ms = 60000
5172 transaction.two.phase.commit.enable = false
5173 transactional.id = null
5174 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5175
517614:30:08.280 [virtual-634] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
517714:30:08.280 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
517814:30:08.283 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
517914:30:08.283 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_2 with topic ID geyVmBQsQbSSwStyDs8fMw.
518014:30:08.283 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_2-0 with topic ID geyVmBQsQbSSwStyDs8fMw and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
518114:30:08.282 [virtual-634] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Instantiated an idempotent producer.
518214:30:08.287 [virtual-634] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
518314:30:08.287 [virtual-634] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
518414:30:08.288 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
518514:30:08.289 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.Metadata - [Producer clientId=producer-9] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
518614:30:08.289 [virtual-634] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855008287
518714:30:08.290 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-9] ProducerId set to 8 with epoch 0
518814:30:08.291 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78 and requesting the member to rejoin with this id.
518914:30:08.292 [virtual-638] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Subscribed to topic(s): t3_1
519014:30:08.294 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78
519114:30:08.294 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
519214:30:08.294 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78 joins group g3_1 in Empty state. Adding to the group now.
519314:30:08.295 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78).
519414:30:08.298 [virtual-638] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
519514:30:08.299 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
519614:30:08.300 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
519714:30:08.302 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in PreparingRebalance state. Created a new member id consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c and requesting the member to rejoin with this id.
519814:30:08.302 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c
519914:30:08.302 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
520014:30:08.303 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c joins group g3_1 in PreparingRebalance state. Adding to the group now.
520114:30:08.309 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
520214:30:08.309 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_2-0)
520314:30:08.309 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_2-0 with topic id geyVmBQsQbSSwStyDs8fMw.
520414:30:08.312 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_2-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
520514:30:08.312 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_2-0 in /tmp/kafka-logs7572554700115704093/t3_2-0 with properties {}
520614:30:08.312 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] No checkpointed highwatermark is found for partition t3_2-0
520714:30:08.313 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] Log loaded for partition t3_2-0 with initial high watermark 0
520814:30:08.313 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_2-0 with topic id Some(geyVmBQsQbSSwStyDs8fMw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
520914:30:14.295 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 1 with 2 members.
521014:30:14.295 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78', protocol='range'}
521114:30:14.295 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c', protocol='range'}
521214:30:14.297 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Finished assignment for group at generation 1: {consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78=Assignment(partitions=[t3_2-0]), consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c=Assignment(partitions=[t3_1-0])}
521314:30:14.298 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78 for group g3_1 for generation 1. The group has 2 members, 0 of which are static.
521414:30:14.304 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78', protocol='range'}
521514:30:14.304 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c', protocol='range'}
521614:30:14.304 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_2-0])
521714:30:14.304 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
521814:30:14.304 [virtual-637] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Adding newly assigned partitions: [t3_2-0]
521914:30:14.304 [virtual-638] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
522014:30:14.305 [virtual-637] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Found no committed offset for partition t3_2-0
522114:30:14.305 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Found no committed offset for partition t3_1-0
522214:30:14.306 [virtual-637] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting offset for partition t3_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522314:30:14.308 [virtual-638] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522414:30:16.321 [virtual-636] ERROR o.k.KafkaFlow$ - Exception when polling for records
5225java.lang.InterruptedException: null
5226 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5227 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5228 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5229 at ox.channels.ActorRef.ask(actor.scala:64)
5230 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5231 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5232 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5233 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5234 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5235 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5236 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5237 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5238 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5239 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
524014:30:16.321 [virtual-637] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5241java.lang.InterruptedException: null
5242 ... 18 common frames omitted
5243Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5244 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5245 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5246 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5247 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5248 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5249 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5250 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5251 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5252 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5253 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5254 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5255 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5256 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5257 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5258 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5259 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5260 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5261 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
526214:30:16.321 [virtual-638] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5263java.lang.InterruptedException: null
5264 ... 18 common frames omitted
5265Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5266 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5267 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5268 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5269 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5270 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5271 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5272 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5273 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5274 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5275 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5276 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5277 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5278 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5279 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5280 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5281 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5282 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5283 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
528414:30:16.321 [virtual-642] ERROR o.k.KafkaFlow$ - Exception when polling for records
5285java.lang.InterruptedException: null
5286 ... 18 common frames omitted
5287Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5288 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5289 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5290 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5291 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5292 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5293 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5294 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5295 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5296 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5297 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5298 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5299 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5300 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5301 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5302 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5303 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5304 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5305 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
5306 Suppressed: [CIRCULAR REFERENCE: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException]
5307 Wrapped by: java.util.concurrent.ExecutionException: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5308 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396)
5309 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5310 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5311 at ox.channels.ActorRef.ask(actor.scala:64)
5312 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5313 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
5314 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5315 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5316 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
5317 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5318 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5319 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
5320 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
5321 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5322 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
5323 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
5324 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
5325 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
5326 ... 3 common frames omitted
532714:30:16.322 [virtual-648] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
532814:30:16.322 [virtual-647] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Revoke previously assigned partitions [t3_2-0]
532914:30:16.323 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Member consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
533014:30:16.323 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
533114:30:16.324 [virtual-647] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
533214:30:16.324 [virtual-649] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
533314:30:16.325 [virtual-649] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Member consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
533414:30:16.325 [virtual-649] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
533514:30:16.325 [virtual-649] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
533614:30:16.325 [virtual-648] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
533714:30:16.325 [virtual-648] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
533814:30:16.325 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
533914:30:16.326 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_1-3-7c127b5e-f0dc-403c-ba3e-b44d5522cf78) members.).
534014:30:16.326 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-4-d7784c2c-df43-4de5-b076-8072028ac14c has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
534114:30:16.326 [virtual-648] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
534214:30:16.327 [virtual-648] INFO o.a.k.c.m.Metrics - Metrics reporters closed
534314:30:16.327 [virtual-648] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-9 unregistered
534414:30:16.327 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 2 is now empty.
534514:30:16.328 [virtual-647] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
534614:30:16.328 [virtual-647] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
534714:30:16.329 [virtual-647] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
534814:30:16.329 [virtual-647] INFO o.a.k.c.m.Metrics - Metrics reporters closed
534914:30:16.331 [virtual-647] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-3 unregistered
535014:30:16.818 [virtual-649] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
535114:30:16.819 [virtual-649] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
535214:30:16.819 [virtual-649] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
535314:30:16.819 [virtual-649] INFO o.a.k.c.m.Metrics - Metrics reporters closed
535414:30:16.821 [virtual-649] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-4 unregistered
535514:30:16.821 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5356 acks = -1
5357 batch.size = 16384
5358 bootstrap.servers = [localhost:6001]
5359 buffer.memory = 33554432
5360 client.dns.lookup = use_all_dns_ips
5361 client.id = producer-10
5362 compression.gzip.level = -1
5363 compression.lz4.level = 9
5364 compression.type = none
5365 compression.zstd.level = 3
5366 connections.max.idle.ms = 540000
5367 delivery.timeout.ms = 120000
5368 enable.idempotence = true
5369 enable.metrics.push = true
5370 interceptor.classes = []
5371 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5372 linger.ms = 5
5373 max.block.ms = 10000
5374 max.in.flight.requests.per.connection = 5
5375 max.request.size = 1048576
5376 metadata.max.age.ms = 300000
5377 metadata.max.idle.ms = 300000
5378 metadata.recovery.rebootstrap.trigger.ms = 300000
5379 metadata.recovery.strategy = rebootstrap
5380 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5381 metrics.num.samples = 2
5382 metrics.recording.level = INFO
5383 metrics.sample.window.ms = 30000
5384 partitioner.adaptive.partitioning.enable = true
5385 partitioner.availability.timeout.ms = 0
5386 partitioner.class = null
5387 partitioner.ignore.keys = false
5388 receive.buffer.bytes = 32768
5389 reconnect.backoff.max.ms = 1000
5390 reconnect.backoff.ms = 50
5391 request.timeout.ms = 30000
5392 retries = 2147483647
5393 retry.backoff.max.ms = 1000
5394 retry.backoff.ms = 1000
5395 sasl.client.callback.handler.class = null
5396 sasl.jaas.config = null
5397 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5398 sasl.kerberos.min.time.before.relogin = 60000
5399 sasl.kerberos.service.name = null
5400 sasl.kerberos.ticket.renew.jitter = 0.05
5401 sasl.kerberos.ticket.renew.window.factor = 0.8
5402 sasl.login.callback.handler.class = null
5403 sasl.login.class = null
5404 sasl.login.connect.timeout.ms = null
5405 sasl.login.read.timeout.ms = null
5406 sasl.login.refresh.buffer.seconds = 300
5407 sasl.login.refresh.min.period.seconds = 60
5408 sasl.login.refresh.window.factor = 0.8
5409 sasl.login.refresh.window.jitter = 0.05
5410 sasl.login.retry.backoff.max.ms = 10000
5411 sasl.login.retry.backoff.ms = 100
5412 sasl.mechanism = GSSAPI
5413 sasl.oauthbearer.assertion.algorithm = RS256
5414 sasl.oauthbearer.assertion.claim.aud = null
5415 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5416 sasl.oauthbearer.assertion.claim.iss = null
5417 sasl.oauthbearer.assertion.claim.jti.include = false
5418 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5419 sasl.oauthbearer.assertion.claim.sub = null
5420 sasl.oauthbearer.assertion.file = null
5421 sasl.oauthbearer.assertion.private.key.file = null
5422 sasl.oauthbearer.assertion.private.key.passphrase = null
5423 sasl.oauthbearer.assertion.template.file = null
5424 sasl.oauthbearer.client.credentials.client.id = null
5425 sasl.oauthbearer.client.credentials.client.secret = null
5426 sasl.oauthbearer.clock.skew.seconds = 30
5427 sasl.oauthbearer.expected.audience = null
5428 sasl.oauthbearer.expected.issuer = null
5429 sasl.oauthbearer.header.urlencode = false
5430 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5431 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5432 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5433 sasl.oauthbearer.jwks.endpoint.url = null
5434 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5435 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5436 sasl.oauthbearer.scope = null
5437 sasl.oauthbearer.scope.claim.name = scope
5438 sasl.oauthbearer.sub.claim.name = sub
5439 sasl.oauthbearer.token.endpoint.url = null
5440 security.protocol = PLAINTEXT
5441 security.providers = null
5442 send.buffer.bytes = 131072
5443 socket.connection.setup.timeout.max.ms = 30000
5444 socket.connection.setup.timeout.ms = 10000
5445 ssl.cipher.suites = null
5446 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5447 ssl.endpoint.identification.algorithm = https
5448 ssl.engine.factory.class = null
5449 ssl.key.password = null
5450 ssl.keymanager.algorithm = SunX509
5451 ssl.keystore.certificate.chain = null
5452 ssl.keystore.key = null
5453 ssl.keystore.location = null
5454 ssl.keystore.password = null
5455 ssl.keystore.type = JKS
5456 ssl.protocol = TLSv1.3
5457 ssl.provider = null
5458 ssl.secure.random.implementation = null
5459 ssl.trustmanager.algorithm = PKIX
5460 ssl.truststore.certificates = null
5461 ssl.truststore.location = null
5462 ssl.truststore.password = null
5463 ssl.truststore.type = JKS
5464 transaction.timeout.ms = 60000
5465 transaction.two.phase.commit.enable = false
5466 transactional.id = null
5467 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5468
546914:30:16.822 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
547014:30:16.822 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Instantiated an idempotent producer.
547114:30:16.824 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
547214:30:16.824 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
547314:30:16.824 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855016824
547414:30:16.827 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.Metadata - [Producer clientId=producer-10] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
547514:30:16.827 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-10] ProducerId set to 9 with epoch 0
547614:30:16.835 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
547714:30:16.838 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
547814:30:16.838 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
547914:30:16.838 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
548014:30:16.838 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
548114:30:16.838 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-10 unregistered
548214:30:16.840 [virtual-652] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5483 allow.auto.create.topics = true
5484 auto.commit.interval.ms = 5000
5485 auto.offset.reset = earliest
5486 bootstrap.servers = [localhost:6001]
5487 check.crcs = true
5488 client.dns.lookup = use_all_dns_ips
5489 client.id = consumer-g3_1-5
5490 client.rack =
5491 connections.max.idle.ms = 540000
5492 default.api.timeout.ms = 60000
5493 enable.auto.commit = false
5494 enable.metrics.push = true
5495 exclude.internal.topics = true
5496 fetch.max.bytes = 52428800
5497 fetch.max.wait.ms = 500
5498 fetch.min.bytes = 1
5499 group.id = g3_1
5500 group.instance.id = null
5501 group.protocol = classic
5502 group.remote.assignor = null
5503 heartbeat.interval.ms = 3000
5504 interceptor.classes = []
5505 internal.leave.group.on.close = true
5506 internal.throw.on.fetch.stable.offset.unsupported = false
5507 isolation.level = read_uncommitted
5508 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5509 max.partition.fetch.bytes = 1048576
5510 max.poll.interval.ms = 300000
5511 max.poll.records = 500
5512 metadata.max.age.ms = 300000
5513 metadata.recovery.rebootstrap.trigger.ms = 300000
5514 metadata.recovery.strategy = rebootstrap
5515 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5516 metrics.num.samples = 2
5517 metrics.recording.level = INFO
5518 metrics.sample.window.ms = 30000
5519 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5520 receive.buffer.bytes = 65536
5521 reconnect.backoff.max.ms = 1000
5522 reconnect.backoff.ms = 50
5523 request.timeout.ms = 30000
5524 retry.backoff.max.ms = 1000
5525 retry.backoff.ms = 100
5526 sasl.client.callback.handler.class = null
5527 sasl.jaas.config = null
5528 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5529 sasl.kerberos.min.time.before.relogin = 60000
5530 sasl.kerberos.service.name = null
5531 sasl.kerberos.ticket.renew.jitter = 0.05
5532 sasl.kerberos.ticket.renew.window.factor = 0.8
5533 sasl.login.callback.handler.class = null
5534 sasl.login.class = null
5535 sasl.login.connect.timeout.ms = null
5536 sasl.login.read.timeout.ms = null
5537 sasl.login.refresh.buffer.seconds = 300
5538 sasl.login.refresh.min.period.seconds = 60
5539 sasl.login.refresh.window.factor = 0.8
5540 sasl.login.refresh.window.jitter = 0.05
5541 sasl.login.retry.backoff.max.ms = 10000
5542 sasl.login.retry.backoff.ms = 100
5543 sasl.mechanism = GSSAPI
5544 sasl.oauthbearer.assertion.algorithm = RS256
5545 sasl.oauthbearer.assertion.claim.aud = null
5546 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5547 sasl.oauthbearer.assertion.claim.iss = null
5548 sasl.oauthbearer.assertion.claim.jti.include = false
5549 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5550 sasl.oauthbearer.assertion.claim.sub = null
5551 sasl.oauthbearer.assertion.file = null
5552 sasl.oauthbearer.assertion.private.key.file = null
5553 sasl.oauthbearer.assertion.private.key.passphrase = null
5554 sasl.oauthbearer.assertion.template.file = null
5555 sasl.oauthbearer.client.credentials.client.id = null
5556 sasl.oauthbearer.client.credentials.client.secret = null
5557 sasl.oauthbearer.clock.skew.seconds = 30
5558 sasl.oauthbearer.expected.audience = null
5559 sasl.oauthbearer.expected.issuer = null
5560 sasl.oauthbearer.header.urlencode = false
5561 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5562 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5563 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5564 sasl.oauthbearer.jwks.endpoint.url = null
5565 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5566 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5567 sasl.oauthbearer.scope = null
5568 sasl.oauthbearer.scope.claim.name = scope
5569 sasl.oauthbearer.sub.claim.name = sub
5570 sasl.oauthbearer.token.endpoint.url = null
5571 security.protocol = PLAINTEXT
5572 security.providers = null
5573 send.buffer.bytes = 131072
5574 session.timeout.ms = 45000
5575 share.acknowledgement.mode = implicit
5576 socket.connection.setup.timeout.max.ms = 30000
5577 socket.connection.setup.timeout.ms = 10000
5578 ssl.cipher.suites = null
5579 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5580 ssl.endpoint.identification.algorithm = https
5581 ssl.engine.factory.class = null
5582 ssl.key.password = null
5583 ssl.keymanager.algorithm = SunX509
5584 ssl.keystore.certificate.chain = null
5585 ssl.keystore.key = null
5586 ssl.keystore.location = null
5587 ssl.keystore.password = null
5588 ssl.keystore.type = JKS
5589 ssl.protocol = TLSv1.3
5590 ssl.provider = null
5591 ssl.secure.random.implementation = null
5592 ssl.trustmanager.algorithm = PKIX
5593 ssl.truststore.certificates = null
5594 ssl.truststore.location = null
5595 ssl.truststore.password = null
5596 ssl.truststore.type = JKS
5597 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5598
559914:30:16.840 [virtual-652] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
560014:30:16.842 [virtual-652] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
560114:30:16.843 [virtual-652] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
560214:30:16.843 [virtual-652] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855016842
560314:30:16.844 [virtual-655] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Subscribed to topic(s): t3_1
560414:30:16.847 [virtual-655] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
560514:30:16.848 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
560614:30:16.849 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
560714:30:16.851 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4 and requesting the member to rejoin with this id.
560814:30:16.851 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4
560914:30:16.851 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
561014:30:16.852 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4 joins group g3_1 in Empty state. Adding to the group now.
561114:30:16.852 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4).
561214:30:19.853 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 3 with 1 members.
561314:30:19.854 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4', protocol='range'}
561414:30:19.854 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Finished assignment for group at generation 3: {consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4=Assignment(partitions=[t3_1-0])}
561514:30:19.855 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4 for group g3_1 for generation 3. The group has 1 members, 0 of which are static.
561614:30:19.861 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4', protocol='range'}
561714:30:19.861 [virtual-655] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
561814:30:19.861 [virtual-655] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
561914:30:19.862 [virtual-655] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t3_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
562014:30:19.866 [virtual-652] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5621 allow.auto.create.topics = true
5622 auto.commit.interval.ms = 5000
5623 auto.offset.reset = earliest
5624 bootstrap.servers = [localhost:6001]
5625 check.crcs = true
5626 client.dns.lookup = use_all_dns_ips
5627 client.id = consumer-g3_2-6
5628 client.rack =
5629 connections.max.idle.ms = 540000
5630 default.api.timeout.ms = 60000
5631 enable.auto.commit = false
5632 enable.metrics.push = true
5633 exclude.internal.topics = true
5634 fetch.max.bytes = 52428800
5635 fetch.max.wait.ms = 500
5636 fetch.min.bytes = 1
5637 group.id = g3_2
5638 group.instance.id = null
5639 group.protocol = classic
5640 group.remote.assignor = null
5641 heartbeat.interval.ms = 3000
5642 interceptor.classes = []
5643 internal.leave.group.on.close = true
5644 internal.throw.on.fetch.stable.offset.unsupported = false
5645 isolation.level = read_uncommitted
5646 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5647 max.partition.fetch.bytes = 1048576
5648 max.poll.interval.ms = 300000
5649 max.poll.records = 500
5650 metadata.max.age.ms = 300000
5651 metadata.recovery.rebootstrap.trigger.ms = 300000
5652 metadata.recovery.strategy = rebootstrap
5653 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5654 metrics.num.samples = 2
5655 metrics.recording.level = INFO
5656 metrics.sample.window.ms = 30000
5657 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5658 receive.buffer.bytes = 65536
5659 reconnect.backoff.max.ms = 1000
5660 reconnect.backoff.ms = 50
5661 request.timeout.ms = 30000
5662 retry.backoff.max.ms = 1000
5663 retry.backoff.ms = 100
5664 sasl.client.callback.handler.class = null
5665 sasl.jaas.config = null
5666 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5667 sasl.kerberos.min.time.before.relogin = 60000
5668 sasl.kerberos.service.name = null
5669 sasl.kerberos.ticket.renew.jitter = 0.05
5670 sasl.kerberos.ticket.renew.window.factor = 0.8
5671 sasl.login.callback.handler.class = null
5672 sasl.login.class = null
5673 sasl.login.connect.timeout.ms = null
5674 sasl.login.read.timeout.ms = null
5675 sasl.login.refresh.buffer.seconds = 300
5676 sasl.login.refresh.min.period.seconds = 60
5677 sasl.login.refresh.window.factor = 0.8
5678 sasl.login.refresh.window.jitter = 0.05
5679 sasl.login.retry.backoff.max.ms = 10000
5680 sasl.login.retry.backoff.ms = 100
5681 sasl.mechanism = GSSAPI
5682 sasl.oauthbearer.assertion.algorithm = RS256
5683 sasl.oauthbearer.assertion.claim.aud = null
5684 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5685 sasl.oauthbearer.assertion.claim.iss = null
5686 sasl.oauthbearer.assertion.claim.jti.include = false
5687 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5688 sasl.oauthbearer.assertion.claim.sub = null
5689 sasl.oauthbearer.assertion.file = null
5690 sasl.oauthbearer.assertion.private.key.file = null
5691 sasl.oauthbearer.assertion.private.key.passphrase = null
5692 sasl.oauthbearer.assertion.template.file = null
5693 sasl.oauthbearer.client.credentials.client.id = null
5694 sasl.oauthbearer.client.credentials.client.secret = null
5695 sasl.oauthbearer.clock.skew.seconds = 30
5696 sasl.oauthbearer.expected.audience = null
5697 sasl.oauthbearer.expected.issuer = null
5698 sasl.oauthbearer.header.urlencode = false
5699 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5700 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5701 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5702 sasl.oauthbearer.jwks.endpoint.url = null
5703 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5704 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5705 sasl.oauthbearer.scope = null
5706 sasl.oauthbearer.scope.claim.name = scope
5707 sasl.oauthbearer.sub.claim.name = sub
5708 sasl.oauthbearer.token.endpoint.url = null
5709 security.protocol = PLAINTEXT
5710 security.providers = null
5711 send.buffer.bytes = 131072
5712 session.timeout.ms = 45000
5713 share.acknowledgement.mode = implicit
5714 socket.connection.setup.timeout.max.ms = 30000
5715 socket.connection.setup.timeout.ms = 10000
5716 ssl.cipher.suites = null
5717 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5718 ssl.endpoint.identification.algorithm = https
5719 ssl.engine.factory.class = null
5720 ssl.key.password = null
5721 ssl.keymanager.algorithm = SunX509
5722 ssl.keystore.certificate.chain = null
5723 ssl.keystore.key = null
5724 ssl.keystore.location = null
5725 ssl.keystore.password = null
5726 ssl.keystore.type = JKS
5727 ssl.protocol = TLSv1.3
5728 ssl.provider = null
5729 ssl.secure.random.implementation = null
5730 ssl.trustmanager.algorithm = PKIX
5731 ssl.truststore.certificates = null
5732 ssl.truststore.location = null
5733 ssl.truststore.password = null
5734 ssl.truststore.type = JKS
5735 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5736
573714:30:19.867 [virtual-652] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
573814:30:19.869 [virtual-652] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
573914:30:19.870 [virtual-652] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
574014:30:19.870 [virtual-652] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855019869
574114:30:19.870 [virtual-659] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Subscribed to topic(s): t3_1
574214:30:19.873 [virtual-659] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
574314:30:19.873 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
574414:30:19.874 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
574514:30:19.875 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_2 in Empty state. Created a new member id consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87 and requesting the member to rejoin with this id.
574614:30:19.876 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: need to re-join with the given member-id: consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87
574714:30:19.876 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
574814:30:19.877 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87 joins group g3_2 in Empty state. Adding to the group now.
574914:30:19.877 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87).
575014:30:22.877 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_2 generation 1 with 1 members.
575114:30:22.878 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87', protocol='range'}
575214:30:22.878 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Finished assignment for group at generation 1: {consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87=Assignment(partitions=[t3_1-0])}
575314:30:22.878 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87 for group g3_2 for generation 1. The group has 1 members, 0 of which are static.
575414:30:22.884 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87', protocol='range'}
575514:30:22.885 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Notifying assignor about the new Assignment(partitions=[t3_1-0])
575614:30:22.885 [virtual-659] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Adding newly assigned partitions: [t3_1-0]
575714:30:22.886 [virtual-659] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Found no committed offset for partition t3_1-0
575814:30:22.888 [virtual-659] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
575914:30:22.890 [virtual-658] ERROR o.k.KafkaFlow$ - Exception when polling for records
5760java.lang.InterruptedException: null
5761 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5762 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5763 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5764 at ox.channels.ActorRef.ask(actor.scala:64)
5765 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5766 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5767 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5768 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5769 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5770 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5771 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5772 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5773 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5774 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
577514:30:22.890 [virtual-659] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5776java.lang.InterruptedException: null
5777 ... 18 common frames omitted
5778Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5779 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5780 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5781 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5782 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5783 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5784 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5785 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5786 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5787 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5788 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5789 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5790 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5791 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5792 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5793 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5794 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5795 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5796 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
579714:30:22.891 [virtual-654] ERROR o.k.KafkaFlow$ - Exception when polling for records
5798java.lang.InterruptedException: null
5799 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5800 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5801 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5802 at ox.channels.ActorRef.ask(actor.scala:64)
5803 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5804 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5805 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5806 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5807 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5808 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5809 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5810 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5811 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5812 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
581314:30:22.891 [virtual-655] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5814java.lang.InterruptedException: null
5815 ... 18 common frames omitted
5816Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5817 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5818 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5819 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5820 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5821 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5822 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5823 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5824 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5825 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5826 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5827 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5828 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5829 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5830 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5831 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5832 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5833 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5834 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
583514:30:22.891 [virtual-661] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Revoke previously assigned partitions [t3_1-0]
583614:30:22.892 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Member consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
583714:30:22.892 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting generation and member id due to: consumer pro-actively leaving the group
583814:30:22.892 [virtual-661] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: consumer pro-actively leaving the group
583914:30:22.892 [virtual-662] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
584014:30:22.893 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Member consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
584114:30:22.893 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
584214:30:22.893 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
584314:30:22.894 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
584414:30:22.894 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g3_1-5-5742e31a-9ff1-4422-b6a3-a3ce821418e4) members.).
584514:30:22.894 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 4 is now empty.
584614:30:22.894 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_2] Member consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
584714:30:22.894 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_2-6-0f37c067-6124-4fd4-b0d8-11dae3d03a87) members.).
584814:30:22.894 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_2 with generation 2 is now empty.
584914:30:23.378 [virtual-662] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
585014:30:23.378 [virtual-662] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
585114:30:23.379 [virtual-662] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
585214:30:23.379 [virtual-662] INFO o.a.k.c.m.Metrics - Metrics reporters closed
585314:30:23.381 [virtual-662] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-5 unregistered
585414:30:23.392 [virtual-661] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
585514:30:23.392 [virtual-661] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
585614:30:23.392 [virtual-661] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
585714:30:23.392 [virtual-661] INFO o.a.k.c.m.Metrics - Metrics reporters closed
585814:30:23.394 [virtual-661] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_2-6 unregistered
585914:30:23.397 [virtual-663] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5860 acks = -1
5861 batch.size = 16384
5862 bootstrap.servers = [localhost:6001]
5863 buffer.memory = 33554432
5864 client.dns.lookup = use_all_dns_ips
5865 client.id = producer-11
5866 compression.gzip.level = -1
5867 compression.lz4.level = 9
5868 compression.type = none
5869 compression.zstd.level = 3
5870 connections.max.idle.ms = 540000
5871 delivery.timeout.ms = 120000
5872 enable.idempotence = true
5873 enable.metrics.push = true
5874 interceptor.classes = []
5875 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5876 linger.ms = 5
5877 max.block.ms = 60000
5878 max.in.flight.requests.per.connection = 5
5879 max.request.size = 1048576
5880 metadata.max.age.ms = 300000
5881 metadata.max.idle.ms = 300000
5882 metadata.recovery.rebootstrap.trigger.ms = 300000
5883 metadata.recovery.strategy = rebootstrap
5884 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5885 metrics.num.samples = 2
5886 metrics.recording.level = INFO
5887 metrics.sample.window.ms = 30000
5888 partitioner.adaptive.partitioning.enable = true
5889 partitioner.availability.timeout.ms = 0
5890 partitioner.class = null
5891 partitioner.ignore.keys = false
5892 receive.buffer.bytes = 32768
5893 reconnect.backoff.max.ms = 1000
5894 reconnect.backoff.ms = 50
5895 request.timeout.ms = 30000
5896 retries = 2147483647
5897 retry.backoff.max.ms = 1000
5898 retry.backoff.ms = 100
5899 sasl.client.callback.handler.class = null
5900 sasl.jaas.config = null
5901 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5902 sasl.kerberos.min.time.before.relogin = 60000
5903 sasl.kerberos.service.name = null
5904 sasl.kerberos.ticket.renew.jitter = 0.05
5905 sasl.kerberos.ticket.renew.window.factor = 0.8
5906 sasl.login.callback.handler.class = null
5907 sasl.login.class = null
5908 sasl.login.connect.timeout.ms = null
5909 sasl.login.read.timeout.ms = null
5910 sasl.login.refresh.buffer.seconds = 300
5911 sasl.login.refresh.min.period.seconds = 60
5912 sasl.login.refresh.window.factor = 0.8
5913 sasl.login.refresh.window.jitter = 0.05
5914 sasl.login.retry.backoff.max.ms = 10000
5915 sasl.login.retry.backoff.ms = 100
5916 sasl.mechanism = GSSAPI
5917 sasl.oauthbearer.assertion.algorithm = RS256
5918 sasl.oauthbearer.assertion.claim.aud = null
5919 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5920 sasl.oauthbearer.assertion.claim.iss = null
5921 sasl.oauthbearer.assertion.claim.jti.include = false
5922 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5923 sasl.oauthbearer.assertion.claim.sub = null
5924 sasl.oauthbearer.assertion.file = null
5925 sasl.oauthbearer.assertion.private.key.file = null
5926 sasl.oauthbearer.assertion.private.key.passphrase = null
5927 sasl.oauthbearer.assertion.template.file = null
5928 sasl.oauthbearer.client.credentials.client.id = null
5929 sasl.oauthbearer.client.credentials.client.secret = null
5930 sasl.oauthbearer.clock.skew.seconds = 30
5931 sasl.oauthbearer.expected.audience = null
5932 sasl.oauthbearer.expected.issuer = null
5933 sasl.oauthbearer.header.urlencode = false
5934 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5935 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5936 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5937 sasl.oauthbearer.jwks.endpoint.url = null
5938 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5939 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5940 sasl.oauthbearer.scope = null
5941 sasl.oauthbearer.scope.claim.name = scope
5942 sasl.oauthbearer.sub.claim.name = sub
5943 sasl.oauthbearer.token.endpoint.url = null
5944 security.protocol = PLAINTEXT
5945 security.providers = null
5946 send.buffer.bytes = 131072
5947 socket.connection.setup.timeout.max.ms = 30000
5948 socket.connection.setup.timeout.ms = 10000
5949 ssl.cipher.suites = null
5950 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5951 ssl.endpoint.identification.algorithm = https
5952 ssl.engine.factory.class = null
5953 ssl.key.password = null
5954 ssl.keymanager.algorithm = SunX509
5955 ssl.keystore.certificate.chain = null
5956 ssl.keystore.key = null
5957 ssl.keystore.location = null
5958 ssl.keystore.password = null
5959 ssl.keystore.type = JKS
5960 ssl.protocol = TLSv1.3
5961 ssl.provider = null
5962 ssl.secure.random.implementation = null
5963 ssl.trustmanager.algorithm = PKIX
5964 ssl.truststore.certificates = null
5965 ssl.truststore.location = null
5966 ssl.truststore.password = null
5967 ssl.truststore.type = JKS
5968 transaction.timeout.ms = 60000
5969 transaction.two.phase.commit.enable = false
5970 transactional.id = null
5971 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5972
597314:30:23.398 [virtual-663] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
597414:30:23.398 [virtual-663] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Instantiated an idempotent producer.
597514:30:23.400 [virtual-663] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
597614:30:23.400 [virtual-663] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
597714:30:23.400 [virtual-663] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855023400
597814:30:23.403 [data-plane-kafka-request-handler-5] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t4) to the active controller.
597914:30:23.404 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t4', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
598014:30:23.404 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t4 with topic ID Xriy1_bXQni4qVus-eAtmQ.
598114:30:23.404 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t4-0 with topic ID Xriy1_bXQni4qVus-eAtmQ and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
598214:30:23.405 [kafka-producer-network-thread | producer-11] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-11] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t4=UNKNOWN_TOPIC_OR_PARTITION}
598314:30:23.405 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.Metadata - [Producer clientId=producer-11] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
598414:30:23.405 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-11] ProducerId set to 10 with epoch 0
598514:30:23.430 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
598614:30:23.431 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t4-0)
598714:30:23.431 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t4-0 with topic id Xriy1_bXQni4qVus-eAtmQ.
598814:30:23.433 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t4-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
598914:30:23.434 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t4-0 in /tmp/kafka-logs7572554700115704093/t4-0 with properties {}
599014:30:23.434 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] No checkpointed highwatermark is found for partition t4-0
599114:30:23.434 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] Log loaded for partition t4-0 with initial high watermark 0
599214:30:23.435 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t4-0 with topic id Some(Xriy1_bXQni4qVus-eAtmQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
599314:30:23.524 [virtual-667] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
599414:30:23.528 [virtual-667] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
599514:30:23.529 [virtual-667] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
599614:30:23.529 [virtual-667] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
599714:30:23.529 [virtual-667] INFO o.a.k.c.m.Metrics - Metrics reporters closed
599814:30:23.529 [virtual-667] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-11 unregistered
599914:30:23.530 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6000 allow.auto.create.topics = true
6001 auto.commit.interval.ms = 5000
6002 auto.offset.reset = earliest
6003 bootstrap.servers = [localhost:6001]
6004 check.crcs = true
6005 client.dns.lookup = use_all_dns_ips
6006 client.id = consumer-embedded-kafka-spec-7
6007 client.rack =
6008 connections.max.idle.ms = 540000
6009 default.api.timeout.ms = 60000
6010 enable.auto.commit = false
6011 enable.metrics.push = true
6012 exclude.internal.topics = true
6013 fetch.max.bytes = 52428800
6014 fetch.max.wait.ms = 500
6015 fetch.min.bytes = 1
6016 group.id = embedded-kafka-spec
6017 group.instance.id = null
6018 group.protocol = classic
6019 group.remote.assignor = null
6020 heartbeat.interval.ms = 3000
6021 interceptor.classes = []
6022 internal.leave.group.on.close = true
6023 internal.throw.on.fetch.stable.offset.unsupported = false
6024 isolation.level = read_uncommitted
6025 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6026 max.partition.fetch.bytes = 1048576
6027 max.poll.interval.ms = 300000
6028 max.poll.records = 500
6029 metadata.max.age.ms = 300000
6030 metadata.recovery.rebootstrap.trigger.ms = 300000
6031 metadata.recovery.strategy = rebootstrap
6032 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6033 metrics.num.samples = 2
6034 metrics.recording.level = INFO
6035 metrics.sample.window.ms = 30000
6036 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6037 receive.buffer.bytes = 65536
6038 reconnect.backoff.max.ms = 1000
6039 reconnect.backoff.ms = 50
6040 request.timeout.ms = 30000
6041 retry.backoff.max.ms = 1000
6042 retry.backoff.ms = 100
6043 sasl.client.callback.handler.class = null
6044 sasl.jaas.config = null
6045 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6046 sasl.kerberos.min.time.before.relogin = 60000
6047 sasl.kerberos.service.name = null
6048 sasl.kerberos.ticket.renew.jitter = 0.05
6049 sasl.kerberos.ticket.renew.window.factor = 0.8
6050 sasl.login.callback.handler.class = null
6051 sasl.login.class = null
6052 sasl.login.connect.timeout.ms = null
6053 sasl.login.read.timeout.ms = null
6054 sasl.login.refresh.buffer.seconds = 300
6055 sasl.login.refresh.min.period.seconds = 60
6056 sasl.login.refresh.window.factor = 0.8
6057 sasl.login.refresh.window.jitter = 0.05
6058 sasl.login.retry.backoff.max.ms = 10000
6059 sasl.login.retry.backoff.ms = 100
6060 sasl.mechanism = GSSAPI
6061 sasl.oauthbearer.assertion.algorithm = RS256
6062 sasl.oauthbearer.assertion.claim.aud = null
6063 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6064 sasl.oauthbearer.assertion.claim.iss = null
6065 sasl.oauthbearer.assertion.claim.jti.include = false
6066 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6067 sasl.oauthbearer.assertion.claim.sub = null
6068 sasl.oauthbearer.assertion.file = null
6069 sasl.oauthbearer.assertion.private.key.file = null
6070 sasl.oauthbearer.assertion.private.key.passphrase = null
6071 sasl.oauthbearer.assertion.template.file = null
6072 sasl.oauthbearer.client.credentials.client.id = null
6073 sasl.oauthbearer.client.credentials.client.secret = null
6074 sasl.oauthbearer.clock.skew.seconds = 30
6075 sasl.oauthbearer.expected.audience = null
6076 sasl.oauthbearer.expected.issuer = null
6077 sasl.oauthbearer.header.urlencode = false
6078 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6079 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6080 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6081 sasl.oauthbearer.jwks.endpoint.url = null
6082 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6083 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6084 sasl.oauthbearer.scope = null
6085 sasl.oauthbearer.scope.claim.name = scope
6086 sasl.oauthbearer.sub.claim.name = sub
6087 sasl.oauthbearer.token.endpoint.url = null
6088 security.protocol = PLAINTEXT
6089 security.providers = null
6090 send.buffer.bytes = 131072
6091 session.timeout.ms = 45000
6092 share.acknowledgement.mode = implicit
6093 socket.connection.setup.timeout.max.ms = 30000
6094 socket.connection.setup.timeout.ms = 10000
6095 ssl.cipher.suites = null
6096 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6097 ssl.endpoint.identification.algorithm = https
6098 ssl.engine.factory.class = null
6099 ssl.key.password = null
6100 ssl.keymanager.algorithm = SunX509
6101 ssl.keystore.certificate.chain = null
6102 ssl.keystore.key = null
6103 ssl.keystore.location = null
6104 ssl.keystore.password = null
6105 ssl.keystore.type = JKS
6106 ssl.protocol = TLSv1.3
6107 ssl.provider = null
6108 ssl.secure.random.implementation = null
6109 ssl.trustmanager.algorithm = PKIX
6110 ssl.truststore.certificates = null
6111 ssl.truststore.location = null
6112 ssl.truststore.password = null
6113 ssl.truststore.type = JKS
6114 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6115
611614:30:23.530 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
611714:30:23.534 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
611814:30:23.534 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
611914:30:23.534 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855023534
612014:30:23.534 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Subscribed to topic(s): t4
612114:30:23.537 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
612214:30:23.540 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
612314:30:23.540 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
612414:30:23.542 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff and requesting the member to rejoin with this id.
612514:30:23.542 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff
612614:30:23.543 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
612714:30:23.543 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff joins group embedded-kafka-spec in Empty state. Adding to the group now.
612814:30:23.543 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff).
612914:30:26.544 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 3 with 1 members.
613014:30:26.544 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff', protocol='range'}
613114:30:26.545 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Finished assignment for group at generation 3: {consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff=Assignment(partitions=[t4-0])}
613214:30:26.545 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff for group embedded-kafka-spec for generation 3. The group has 1 members, 0 of which are static.
613314:30:26.550 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff', protocol='range'}
613414:30:26.550 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t4-0])
613514:30:26.550 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t4-0]
613614:30:26.551 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Found no committed offset for partition t4-0
613714:30:26.552 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting offset for partition t4-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
613814:30:26.573 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t4-0]
613914:30:26.573 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
614014:30:26.573 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
614114:30:26.573 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
614214:30:26.573 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
614314:30:26.574 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-7-947088a0-e888-45fe-b506-5e468737f5ff) members.).
614414:30:26.574 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 4 is now empty.
614514:30:27.056 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
614614:30:27.056 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
614714:30:27.057 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
614814:30:27.057 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
614914:30:27.058 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-7 unregistered
615014:30:27.060 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6151 acks = -1
6152 batch.size = 16384
6153 bootstrap.servers = [localhost:6001]
6154 buffer.memory = 33554432
6155 client.dns.lookup = use_all_dns_ips
6156 client.id = producer-12
6157 compression.gzip.level = -1
6158 compression.lz4.level = 9
6159 compression.type = none
6160 compression.zstd.level = 3
6161 connections.max.idle.ms = 540000
6162 delivery.timeout.ms = 120000
6163 enable.idempotence = true
6164 enable.metrics.push = true
6165 interceptor.classes = []
6166 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6167 linger.ms = 5
6168 max.block.ms = 10000
6169 max.in.flight.requests.per.connection = 5
6170 max.request.size = 1048576
6171 metadata.max.age.ms = 300000
6172 metadata.max.idle.ms = 300000
6173 metadata.recovery.rebootstrap.trigger.ms = 300000
6174 metadata.recovery.strategy = rebootstrap
6175 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6176 metrics.num.samples = 2
6177 metrics.recording.level = INFO
6178 metrics.sample.window.ms = 30000
6179 partitioner.adaptive.partitioning.enable = true
6180 partitioner.availability.timeout.ms = 0
6181 partitioner.class = null
6182 partitioner.ignore.keys = false
6183 receive.buffer.bytes = 32768
6184 reconnect.backoff.max.ms = 1000
6185 reconnect.backoff.ms = 50
6186 request.timeout.ms = 30000
6187 retries = 2147483647
6188 retry.backoff.max.ms = 1000
6189 retry.backoff.ms = 1000
6190 sasl.client.callback.handler.class = null
6191 sasl.jaas.config = null
6192 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6193 sasl.kerberos.min.time.before.relogin = 60000
6194 sasl.kerberos.service.name = null
6195 sasl.kerberos.ticket.renew.jitter = 0.05
6196 sasl.kerberos.ticket.renew.window.factor = 0.8
6197 sasl.login.callback.handler.class = null
6198 sasl.login.class = null
6199 sasl.login.connect.timeout.ms = null
6200 sasl.login.read.timeout.ms = null
6201 sasl.login.refresh.buffer.seconds = 300
6202 sasl.login.refresh.min.period.seconds = 60
6203 sasl.login.refresh.window.factor = 0.8
6204 sasl.login.refresh.window.jitter = 0.05
6205 sasl.login.retry.backoff.max.ms = 10000
6206 sasl.login.retry.backoff.ms = 100
6207 sasl.mechanism = GSSAPI
6208 sasl.oauthbearer.assertion.algorithm = RS256
6209 sasl.oauthbearer.assertion.claim.aud = null
6210 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6211 sasl.oauthbearer.assertion.claim.iss = null
6212 sasl.oauthbearer.assertion.claim.jti.include = false
6213 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6214 sasl.oauthbearer.assertion.claim.sub = null
6215 sasl.oauthbearer.assertion.file = null
6216 sasl.oauthbearer.assertion.private.key.file = null
6217 sasl.oauthbearer.assertion.private.key.passphrase = null
6218 sasl.oauthbearer.assertion.template.file = null
6219 sasl.oauthbearer.client.credentials.client.id = null
6220 sasl.oauthbearer.client.credentials.client.secret = null
6221 sasl.oauthbearer.clock.skew.seconds = 30
6222 sasl.oauthbearer.expected.audience = null
6223 sasl.oauthbearer.expected.issuer = null
6224 sasl.oauthbearer.header.urlencode = false
6225 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6226 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6227 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6228 sasl.oauthbearer.jwks.endpoint.url = null
6229 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6230 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6231 sasl.oauthbearer.scope = null
6232 sasl.oauthbearer.scope.claim.name = scope
6233 sasl.oauthbearer.sub.claim.name = sub
6234 sasl.oauthbearer.token.endpoint.url = null
6235 security.protocol = PLAINTEXT
6236 security.providers = null
6237 send.buffer.bytes = 131072
6238 socket.connection.setup.timeout.max.ms = 30000
6239 socket.connection.setup.timeout.ms = 10000
6240 ssl.cipher.suites = null
6241 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6242 ssl.endpoint.identification.algorithm = https
6243 ssl.engine.factory.class = null
6244 ssl.key.password = null
6245 ssl.keymanager.algorithm = SunX509
6246 ssl.keystore.certificate.chain = null
6247 ssl.keystore.key = null
6248 ssl.keystore.location = null
6249 ssl.keystore.password = null
6250 ssl.keystore.type = JKS
6251 ssl.protocol = TLSv1.3
6252 ssl.provider = null
6253 ssl.secure.random.implementation = null
6254 ssl.trustmanager.algorithm = PKIX
6255 ssl.truststore.certificates = null
6256 ssl.truststore.location = null
6257 ssl.truststore.password = null
6258 ssl.truststore.type = JKS
6259 transaction.timeout.ms = 60000
6260 transaction.two.phase.commit.enable = false
6261 transactional.id = null
6262 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6263
626414:30:27.061 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
626514:30:27.061 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Instantiated an idempotent producer.
626614:30:27.063 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
626714:30:27.063 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
626814:30:27.063 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855027063
626914:30:27.065 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_1) to the active controller.
627014:30:27.066 [kafka-producer-network-thread | producer-12] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-12] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t5_1=UNKNOWN_TOPIC_OR_PARTITION}
627114:30:27.066 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.Metadata - [Producer clientId=producer-12] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
627214:30:27.066 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-12] ProducerId set to 11 with epoch 0
627314:30:27.066 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
627414:30:27.067 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_1 with topic ID 6xP391rBQVWgDemcFEW6gg.
627514:30:27.067 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_1-0 with topic ID 6xP391rBQVWgDemcFEW6gg and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
627614:30:27.093 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
627714:30:27.093 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_1-0)
627814:30:27.093 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_1-0 with topic id 6xP391rBQVWgDemcFEW6gg.
627914:30:27.095 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_1-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
628014:30:27.096 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_1-0 in /tmp/kafka-logs7572554700115704093/t5_1-0 with properties {}
628114:30:27.096 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] No checkpointed highwatermark is found for partition t5_1-0
628214:30:27.096 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] Log loaded for partition t5_1-0 with initial high watermark 0
628314:30:27.096 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_1-0 with topic id Some(6xP391rBQVWgDemcFEW6gg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
628414:30:28.078 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
628514:30:28.079 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
628614:30:28.079 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
628714:30:28.079 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
628814:30:28.079 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
628914:30:28.080 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-12 unregistered
629014:30:28.080 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6291 acks = -1
6292 batch.size = 16384
6293 bootstrap.servers = [localhost:6001]
6294 buffer.memory = 33554432
6295 client.dns.lookup = use_all_dns_ips
6296 client.id = producer-13
6297 compression.gzip.level = -1
6298 compression.lz4.level = 9
6299 compression.type = none
6300 compression.zstd.level = 3
6301 connections.max.idle.ms = 540000
6302 delivery.timeout.ms = 120000
6303 enable.idempotence = true
6304 enable.metrics.push = true
6305 interceptor.classes = []
6306 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6307 linger.ms = 5
6308 max.block.ms = 10000
6309 max.in.flight.requests.per.connection = 5
6310 max.request.size = 1048576
6311 metadata.max.age.ms = 300000
6312 metadata.max.idle.ms = 300000
6313 metadata.recovery.rebootstrap.trigger.ms = 300000
6314 metadata.recovery.strategy = rebootstrap
6315 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6316 metrics.num.samples = 2
6317 metrics.recording.level = INFO
6318 metrics.sample.window.ms = 30000
6319 partitioner.adaptive.partitioning.enable = true
6320 partitioner.availability.timeout.ms = 0
6321 partitioner.class = null
6322 partitioner.ignore.keys = false
6323 receive.buffer.bytes = 32768
6324 reconnect.backoff.max.ms = 1000
6325 reconnect.backoff.ms = 50
6326 request.timeout.ms = 30000
6327 retries = 2147483647
6328 retry.backoff.max.ms = 1000
6329 retry.backoff.ms = 1000
6330 sasl.client.callback.handler.class = null
6331 sasl.jaas.config = null
6332 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6333 sasl.kerberos.min.time.before.relogin = 60000
6334 sasl.kerberos.service.name = null
6335 sasl.kerberos.ticket.renew.jitter = 0.05
6336 sasl.kerberos.ticket.renew.window.factor = 0.8
6337 sasl.login.callback.handler.class = null
6338 sasl.login.class = null
6339 sasl.login.connect.timeout.ms = null
6340 sasl.login.read.timeout.ms = null
6341 sasl.login.refresh.buffer.seconds = 300
6342 sasl.login.refresh.min.period.seconds = 60
6343 sasl.login.refresh.window.factor = 0.8
6344 sasl.login.refresh.window.jitter = 0.05
6345 sasl.login.retry.backoff.max.ms = 10000
6346 sasl.login.retry.backoff.ms = 100
6347 sasl.mechanism = GSSAPI
6348 sasl.oauthbearer.assertion.algorithm = RS256
6349 sasl.oauthbearer.assertion.claim.aud = null
6350 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6351 sasl.oauthbearer.assertion.claim.iss = null
6352 sasl.oauthbearer.assertion.claim.jti.include = false
6353 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6354 sasl.oauthbearer.assertion.claim.sub = null
6355 sasl.oauthbearer.assertion.file = null
6356 sasl.oauthbearer.assertion.private.key.file = null
6357 sasl.oauthbearer.assertion.private.key.passphrase = null
6358 sasl.oauthbearer.assertion.template.file = null
6359 sasl.oauthbearer.client.credentials.client.id = null
6360 sasl.oauthbearer.client.credentials.client.secret = null
6361 sasl.oauthbearer.clock.skew.seconds = 30
6362 sasl.oauthbearer.expected.audience = null
6363 sasl.oauthbearer.expected.issuer = null
6364 sasl.oauthbearer.header.urlencode = false
6365 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6366 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6367 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6368 sasl.oauthbearer.jwks.endpoint.url = null
6369 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6370 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6371 sasl.oauthbearer.scope = null
6372 sasl.oauthbearer.scope.claim.name = scope
6373 sasl.oauthbearer.sub.claim.name = sub
6374 sasl.oauthbearer.token.endpoint.url = null
6375 security.protocol = PLAINTEXT
6376 security.providers = null
6377 send.buffer.bytes = 131072
6378 socket.connection.setup.timeout.max.ms = 30000
6379 socket.connection.setup.timeout.ms = 10000
6380 ssl.cipher.suites = null
6381 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6382 ssl.endpoint.identification.algorithm = https
6383 ssl.engine.factory.class = null
6384 ssl.key.password = null
6385 ssl.keymanager.algorithm = SunX509
6386 ssl.keystore.certificate.chain = null
6387 ssl.keystore.key = null
6388 ssl.keystore.location = null
6389 ssl.keystore.password = null
6390 ssl.keystore.type = JKS
6391 ssl.protocol = TLSv1.3
6392 ssl.provider = null
6393 ssl.secure.random.implementation = null
6394 ssl.trustmanager.algorithm = PKIX
6395 ssl.truststore.certificates = null
6396 ssl.truststore.location = null
6397 ssl.truststore.password = null
6398 ssl.truststore.type = JKS
6399 transaction.timeout.ms = 60000
6400 transaction.two.phase.commit.enable = false
6401 transactional.id = null
6402 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6403
640414:30:28.080 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
640514:30:28.081 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Instantiated an idempotent producer.
640614:30:28.083 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
640714:30:28.083 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
640814:30:28.083 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855028083
640914:30:28.085 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.Metadata - [Producer clientId=producer-13] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
641014:30:28.085 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-13] ProducerId set to 12 with epoch 0
641114:30:28.094 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
641214:30:28.095 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
641314:30:28.095 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
641414:30:28.095 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
641514:30:28.095 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
641614:30:28.095 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-13 unregistered
641714:30:28.096 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6418 acks = -1
6419 batch.size = 16384
6420 bootstrap.servers = [localhost:6001]
6421 buffer.memory = 33554432
6422 client.dns.lookup = use_all_dns_ips
6423 client.id = producer-14
6424 compression.gzip.level = -1
6425 compression.lz4.level = 9
6426 compression.type = none
6427 compression.zstd.level = 3
6428 connections.max.idle.ms = 540000
6429 delivery.timeout.ms = 120000
6430 enable.idempotence = true
6431 enable.metrics.push = true
6432 interceptor.classes = []
6433 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6434 linger.ms = 5
6435 max.block.ms = 10000
6436 max.in.flight.requests.per.connection = 5
6437 max.request.size = 1048576
6438 metadata.max.age.ms = 300000
6439 metadata.max.idle.ms = 300000
6440 metadata.recovery.rebootstrap.trigger.ms = 300000
6441 metadata.recovery.strategy = rebootstrap
6442 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6443 metrics.num.samples = 2
6444 metrics.recording.level = INFO
6445 metrics.sample.window.ms = 30000
6446 partitioner.adaptive.partitioning.enable = true
6447 partitioner.availability.timeout.ms = 0
6448 partitioner.class = null
6449 partitioner.ignore.keys = false
6450 receive.buffer.bytes = 32768
6451 reconnect.backoff.max.ms = 1000
6452 reconnect.backoff.ms = 50
6453 request.timeout.ms = 30000
6454 retries = 2147483647
6455 retry.backoff.max.ms = 1000
6456 retry.backoff.ms = 1000
6457 sasl.client.callback.handler.class = null
6458 sasl.jaas.config = null
6459 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6460 sasl.kerberos.min.time.before.relogin = 60000
6461 sasl.kerberos.service.name = null
6462 sasl.kerberos.ticket.renew.jitter = 0.05
6463 sasl.kerberos.ticket.renew.window.factor = 0.8
6464 sasl.login.callback.handler.class = null
6465 sasl.login.class = null
6466 sasl.login.connect.timeout.ms = null
6467 sasl.login.read.timeout.ms = null
6468 sasl.login.refresh.buffer.seconds = 300
6469 sasl.login.refresh.min.period.seconds = 60
6470 sasl.login.refresh.window.factor = 0.8
6471 sasl.login.refresh.window.jitter = 0.05
6472 sasl.login.retry.backoff.max.ms = 10000
6473 sasl.login.retry.backoff.ms = 100
6474 sasl.mechanism = GSSAPI
6475 sasl.oauthbearer.assertion.algorithm = RS256
6476 sasl.oauthbearer.assertion.claim.aud = null
6477 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6478 sasl.oauthbearer.assertion.claim.iss = null
6479 sasl.oauthbearer.assertion.claim.jti.include = false
6480 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6481 sasl.oauthbearer.assertion.claim.sub = null
6482 sasl.oauthbearer.assertion.file = null
6483 sasl.oauthbearer.assertion.private.key.file = null
6484 sasl.oauthbearer.assertion.private.key.passphrase = null
6485 sasl.oauthbearer.assertion.template.file = null
6486 sasl.oauthbearer.client.credentials.client.id = null
6487 sasl.oauthbearer.client.credentials.client.secret = null
6488 sasl.oauthbearer.clock.skew.seconds = 30
6489 sasl.oauthbearer.expected.audience = null
6490 sasl.oauthbearer.expected.issuer = null
6491 sasl.oauthbearer.header.urlencode = false
6492 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6493 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6494 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6495 sasl.oauthbearer.jwks.endpoint.url = null
6496 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6497 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6498 sasl.oauthbearer.scope = null
6499 sasl.oauthbearer.scope.claim.name = scope
6500 sasl.oauthbearer.sub.claim.name = sub
6501 sasl.oauthbearer.token.endpoint.url = null
6502 security.protocol = PLAINTEXT
6503 security.providers = null
6504 send.buffer.bytes = 131072
6505 socket.connection.setup.timeout.max.ms = 30000
6506 socket.connection.setup.timeout.ms = 10000
6507 ssl.cipher.suites = null
6508 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6509 ssl.endpoint.identification.algorithm = https
6510 ssl.engine.factory.class = null
6511 ssl.key.password = null
6512 ssl.keymanager.algorithm = SunX509
6513 ssl.keystore.certificate.chain = null
6514 ssl.keystore.key = null
6515 ssl.keystore.location = null
6516 ssl.keystore.password = null
6517 ssl.keystore.type = JKS
6518 ssl.protocol = TLSv1.3
6519 ssl.provider = null
6520 ssl.secure.random.implementation = null
6521 ssl.trustmanager.algorithm = PKIX
6522 ssl.truststore.certificates = null
6523 ssl.truststore.location = null
6524 ssl.truststore.password = null
6525 ssl.truststore.type = JKS
6526 transaction.timeout.ms = 60000
6527 transaction.two.phase.commit.enable = false
6528 transactional.id = null
6529 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6530
653114:30:28.096 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
653214:30:28.096 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Instantiated an idempotent producer.
653314:30:28.098 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
653414:30:28.098 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
653514:30:28.098 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855028098
653614:30:28.100 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.Metadata - [Producer clientId=producer-14] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
653714:30:28.101 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-14] ProducerId set to 13 with epoch 0
653814:30:28.109 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
653914:30:28.110 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
654014:30:28.111 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
654114:30:28.111 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
654214:30:28.111 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
654314:30:28.111 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-14 unregistered
654414:30:28.112 [virtual-672] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6545 allow.auto.create.topics = true
6546 auto.commit.interval.ms = 5000
6547 auto.offset.reset = earliest
6548 bootstrap.servers = [localhost:6001]
6549 check.crcs = true
6550 client.dns.lookup = use_all_dns_ips
6551 client.id = consumer-g5_1-8
6552 client.rack =
6553 connections.max.idle.ms = 540000
6554 default.api.timeout.ms = 60000
6555 enable.auto.commit = false
6556 enable.metrics.push = true
6557 exclude.internal.topics = true
6558 fetch.max.bytes = 52428800
6559 fetch.max.wait.ms = 500
6560 fetch.min.bytes = 1
6561 group.id = g5_1
6562 group.instance.id = null
6563 group.protocol = classic
6564 group.remote.assignor = null
6565 heartbeat.interval.ms = 3000
6566 interceptor.classes = []
6567 internal.leave.group.on.close = true
6568 internal.throw.on.fetch.stable.offset.unsupported = false
6569 isolation.level = read_uncommitted
6570 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6571 max.partition.fetch.bytes = 1048576
6572 max.poll.interval.ms = 300000
6573 max.poll.records = 500
6574 metadata.max.age.ms = 300000
6575 metadata.recovery.rebootstrap.trigger.ms = 300000
6576 metadata.recovery.strategy = rebootstrap
6577 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6578 metrics.num.samples = 2
6579 metrics.recording.level = INFO
6580 metrics.sample.window.ms = 30000
6581 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6582 receive.buffer.bytes = 65536
6583 reconnect.backoff.max.ms = 1000
6584 reconnect.backoff.ms = 50
6585 request.timeout.ms = 30000
6586 retry.backoff.max.ms = 1000
6587 retry.backoff.ms = 100
6588 sasl.client.callback.handler.class = null
6589 sasl.jaas.config = null
6590 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6591 sasl.kerberos.min.time.before.relogin = 60000
6592 sasl.kerberos.service.name = null
6593 sasl.kerberos.ticket.renew.jitter = 0.05
6594 sasl.kerberos.ticket.renew.window.factor = 0.8
6595 sasl.login.callback.handler.class = null
6596 sasl.login.class = null
6597 sasl.login.connect.timeout.ms = null
6598 sasl.login.read.timeout.ms = null
6599 sasl.login.refresh.buffer.seconds = 300
6600 sasl.login.refresh.min.period.seconds = 60
6601 sasl.login.refresh.window.factor = 0.8
6602 sasl.login.refresh.window.jitter = 0.05
6603 sasl.login.retry.backoff.max.ms = 10000
6604 sasl.login.retry.backoff.ms = 100
6605 sasl.mechanism = GSSAPI
6606 sasl.oauthbearer.assertion.algorithm = RS256
6607 sasl.oauthbearer.assertion.claim.aud = null
6608 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6609 sasl.oauthbearer.assertion.claim.iss = null
6610 sasl.oauthbearer.assertion.claim.jti.include = false
6611 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6612 sasl.oauthbearer.assertion.claim.sub = null
6613 sasl.oauthbearer.assertion.file = null
6614 sasl.oauthbearer.assertion.private.key.file = null
6615 sasl.oauthbearer.assertion.private.key.passphrase = null
6616 sasl.oauthbearer.assertion.template.file = null
6617 sasl.oauthbearer.client.credentials.client.id = null
6618 sasl.oauthbearer.client.credentials.client.secret = null
6619 sasl.oauthbearer.clock.skew.seconds = 30
6620 sasl.oauthbearer.expected.audience = null
6621 sasl.oauthbearer.expected.issuer = null
6622 sasl.oauthbearer.header.urlencode = false
6623 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6624 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6625 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6626 sasl.oauthbearer.jwks.endpoint.url = null
6627 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6628 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6629 sasl.oauthbearer.scope = null
6630 sasl.oauthbearer.scope.claim.name = scope
6631 sasl.oauthbearer.sub.claim.name = sub
6632 sasl.oauthbearer.token.endpoint.url = null
6633 security.protocol = PLAINTEXT
6634 security.providers = null
6635 send.buffer.bytes = 131072
6636 session.timeout.ms = 45000
6637 share.acknowledgement.mode = implicit
6638 socket.connection.setup.timeout.max.ms = 30000
6639 socket.connection.setup.timeout.ms = 10000
6640 ssl.cipher.suites = null
6641 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6642 ssl.endpoint.identification.algorithm = https
6643 ssl.engine.factory.class = null
6644 ssl.key.password = null
6645 ssl.keymanager.algorithm = SunX509
6646 ssl.keystore.certificate.chain = null
6647 ssl.keystore.key = null
6648 ssl.keystore.location = null
6649 ssl.keystore.password = null
6650 ssl.keystore.type = JKS
6651 ssl.protocol = TLSv1.3
6652 ssl.provider = null
6653 ssl.secure.random.implementation = null
6654 ssl.trustmanager.algorithm = PKIX
6655 ssl.truststore.certificates = null
6656 ssl.truststore.location = null
6657 ssl.truststore.password = null
6658 ssl.truststore.type = JKS
6659 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6660
666114:30:28.112 [virtual-674] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6662 allow.auto.create.topics = true
6663 auto.commit.interval.ms = 5000
6664 auto.offset.reset = earliest
6665 bootstrap.servers = [localhost:6001]
6666 check.crcs = true
6667 client.dns.lookup = use_all_dns_ips
6668 client.id = consumer-g5_1-9
6669 client.rack =
6670 connections.max.idle.ms = 540000
6671 default.api.timeout.ms = 60000
6672 enable.auto.commit = false
6673 enable.metrics.push = true
6674 exclude.internal.topics = true
6675 fetch.max.bytes = 52428800
6676 fetch.max.wait.ms = 500
6677 fetch.min.bytes = 1
6678 group.id = g5_1
6679 group.instance.id = null
6680 group.protocol = classic
6681 group.remote.assignor = null
6682 heartbeat.interval.ms = 3000
6683 interceptor.classes = []
6684 internal.leave.group.on.close = true
6685 internal.throw.on.fetch.stable.offset.unsupported = false
6686 isolation.level = read_uncommitted
6687 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6688 max.partition.fetch.bytes = 1048576
6689 max.poll.interval.ms = 300000
6690 max.poll.records = 500
6691 metadata.max.age.ms = 300000
6692 metadata.recovery.rebootstrap.trigger.ms = 300000
6693 metadata.recovery.strategy = rebootstrap
6694 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6695 metrics.num.samples = 2
6696 metrics.recording.level = INFO
6697 metrics.sample.window.ms = 30000
6698 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6699 receive.buffer.bytes = 65536
6700 reconnect.backoff.max.ms = 1000
6701 reconnect.backoff.ms = 50
6702 request.timeout.ms = 30000
6703 retry.backoff.max.ms = 1000
6704 retry.backoff.ms = 100
6705 sasl.client.callback.handler.class = null
6706 sasl.jaas.config = null
6707 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6708 sasl.kerberos.min.time.before.relogin = 60000
6709 sasl.kerberos.service.name = null
6710 sasl.kerberos.ticket.renew.jitter = 0.05
6711 sasl.kerberos.ticket.renew.window.factor = 0.8
6712 sasl.login.callback.handler.class = null
6713 sasl.login.class = null
6714 sasl.login.connect.timeout.ms = null
6715 sasl.login.read.timeout.ms = null
6716 sasl.login.refresh.buffer.seconds = 300
6717 sasl.login.refresh.min.period.seconds = 60
6718 sasl.login.refresh.window.factor = 0.8
6719 sasl.login.refresh.window.jitter = 0.05
6720 sasl.login.retry.backoff.max.ms = 10000
6721 sasl.login.retry.backoff.ms = 100
6722 sasl.mechanism = GSSAPI
6723 sasl.oauthbearer.assertion.algorithm = RS256
6724 sasl.oauthbearer.assertion.claim.aud = null
6725 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6726 sasl.oauthbearer.assertion.claim.iss = null
6727 sasl.oauthbearer.assertion.claim.jti.include = false
6728 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6729 sasl.oauthbearer.assertion.claim.sub = null
6730 sasl.oauthbearer.assertion.file = null
6731 sasl.oauthbearer.assertion.private.key.file = null
6732 sasl.oauthbearer.assertion.private.key.passphrase = null
6733 sasl.oauthbearer.assertion.template.file = null
6734 sasl.oauthbearer.client.credentials.client.id = null
6735 sasl.oauthbearer.client.credentials.client.secret = null
6736 sasl.oauthbearer.clock.skew.seconds = 30
6737 sasl.oauthbearer.expected.audience = null
6738 sasl.oauthbearer.expected.issuer = null
6739 sasl.oauthbearer.header.urlencode = false
6740 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6741 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6742 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6743 sasl.oauthbearer.jwks.endpoint.url = null
6744 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6745 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6746 sasl.oauthbearer.scope = null
6747 sasl.oauthbearer.scope.claim.name = scope
6748 sasl.oauthbearer.sub.claim.name = sub
6749 sasl.oauthbearer.token.endpoint.url = null
6750 security.protocol = PLAINTEXT
6751 security.providers = null
6752 send.buffer.bytes = 131072
6753 session.timeout.ms = 45000
6754 share.acknowledgement.mode = implicit
6755 socket.connection.setup.timeout.max.ms = 30000
6756 socket.connection.setup.timeout.ms = 10000
6757 ssl.cipher.suites = null
6758 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6759 ssl.endpoint.identification.algorithm = https
6760 ssl.engine.factory.class = null
6761 ssl.key.password = null
6762 ssl.keymanager.algorithm = SunX509
6763 ssl.keystore.certificate.chain = null
6764 ssl.keystore.key = null
6765 ssl.keystore.location = null
6766 ssl.keystore.password = null
6767 ssl.keystore.type = JKS
6768 ssl.protocol = TLSv1.3
6769 ssl.provider = null
6770 ssl.secure.random.implementation = null
6771 ssl.trustmanager.algorithm = PKIX
6772 ssl.truststore.certificates = null
6773 ssl.truststore.location = null
6774 ssl.truststore.password = null
6775 ssl.truststore.type = JKS
6776 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6777
677814:30:28.112 [virtual-672] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
677914:30:28.112 [virtual-674] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
678014:30:28.115 [virtual-674] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
678114:30:28.115 [virtual-674] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
678214:30:28.115 [virtual-674] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855028115
678314:30:28.117 [virtual-672] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
678414:30:28.117 [virtual-672] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
678514:30:28.117 [virtual-672] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855028117
678614:30:28.118 [virtual-678] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Subscribed to topic(s): t5_2
678714:30:28.118 [virtual-674] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6788 acks = -1
6789 batch.size = 16384
6790 bootstrap.servers = [localhost:6001]
6791 buffer.memory = 33554432
6792 client.dns.lookup = use_all_dns_ips
6793 client.id = producer-15
6794 compression.gzip.level = -1
6795 compression.lz4.level = 9
6796 compression.type = none
6797 compression.zstd.level = 3
6798 connections.max.idle.ms = 540000
6799 delivery.timeout.ms = 120000
6800 enable.idempotence = true
6801 enable.metrics.push = true
6802 interceptor.classes = []
6803 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6804 linger.ms = 5
6805 max.block.ms = 60000
6806 max.in.flight.requests.per.connection = 5
6807 max.request.size = 1048576
6808 metadata.max.age.ms = 300000
6809 metadata.max.idle.ms = 300000
6810 metadata.recovery.rebootstrap.trigger.ms = 300000
6811 metadata.recovery.strategy = rebootstrap
6812 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6813 metrics.num.samples = 2
6814 metrics.recording.level = INFO
6815 metrics.sample.window.ms = 30000
6816 partitioner.adaptive.partitioning.enable = true
6817 partitioner.availability.timeout.ms = 0
6818 partitioner.class = null
6819 partitioner.ignore.keys = false
6820 receive.buffer.bytes = 32768
6821 reconnect.backoff.max.ms = 1000
6822 reconnect.backoff.ms = 50
6823 request.timeout.ms = 30000
6824 retries = 2147483647
6825 retry.backoff.max.ms = 1000
6826 retry.backoff.ms = 100
6827 sasl.client.callback.handler.class = null
6828 sasl.jaas.config = null
6829 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6830 sasl.kerberos.min.time.before.relogin = 60000
6831 sasl.kerberos.service.name = null
6832 sasl.kerberos.ticket.renew.jitter = 0.05
6833 sasl.kerberos.ticket.renew.window.factor = 0.8
6834 sasl.login.callback.handler.class = null
6835 sasl.login.class = null
6836 sasl.login.connect.timeout.ms = null
6837 sasl.login.read.timeout.ms = null
6838 sasl.login.refresh.buffer.seconds = 300
6839 sasl.login.refresh.min.period.seconds = 60
6840 sasl.login.refresh.window.factor = 0.8
6841 sasl.login.refresh.window.jitter = 0.05
6842 sasl.login.retry.backoff.max.ms = 10000
6843 sasl.login.retry.backoff.ms = 100
6844 sasl.mechanism = GSSAPI
6845 sasl.oauthbearer.assertion.algorithm = RS256
6846 sasl.oauthbearer.assertion.claim.aud = null
6847 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6848 sasl.oauthbearer.assertion.claim.iss = null
6849 sasl.oauthbearer.assertion.claim.jti.include = false
6850 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6851 sasl.oauthbearer.assertion.claim.sub = null
6852 sasl.oauthbearer.assertion.file = null
6853 sasl.oauthbearer.assertion.private.key.file = null
6854 sasl.oauthbearer.assertion.private.key.passphrase = null
6855 sasl.oauthbearer.assertion.template.file = null
6856 sasl.oauthbearer.client.credentials.client.id = null
6857 sasl.oauthbearer.client.credentials.client.secret = null
6858 sasl.oauthbearer.clock.skew.seconds = 30
6859 sasl.oauthbearer.expected.audience = null
6860 sasl.oauthbearer.expected.issuer = null
6861 sasl.oauthbearer.header.urlencode = false
6862 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6863 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6864 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6865 sasl.oauthbearer.jwks.endpoint.url = null
6866 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6867 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6868 sasl.oauthbearer.scope = null
6869 sasl.oauthbearer.scope.claim.name = scope
6870 sasl.oauthbearer.sub.claim.name = sub
6871 sasl.oauthbearer.token.endpoint.url = null
6872 security.protocol = PLAINTEXT
6873 security.providers = null
6874 send.buffer.bytes = 131072
6875 socket.connection.setup.timeout.max.ms = 30000
6876 socket.connection.setup.timeout.ms = 10000
6877 ssl.cipher.suites = null
6878 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6879 ssl.endpoint.identification.algorithm = https
6880 ssl.engine.factory.class = null
6881 ssl.key.password = null
6882 ssl.keymanager.algorithm = SunX509
6883 ssl.keystore.certificate.chain = null
6884 ssl.keystore.key = null
6885 ssl.keystore.location = null
6886 ssl.keystore.password = null
6887 ssl.keystore.type = JKS
6888 ssl.protocol = TLSv1.3
6889 ssl.provider = null
6890 ssl.secure.random.implementation = null
6891 ssl.trustmanager.algorithm = PKIX
6892 ssl.truststore.certificates = null
6893 ssl.truststore.location = null
6894 ssl.truststore.password = null
6895 ssl.truststore.type = JKS
6896 transaction.timeout.ms = 60000
6897 transaction.two.phase.commit.enable = false
6898 transactional.id = null
6899 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6900
690114:30:28.119 [virtual-674] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
690214:30:28.119 [virtual-674] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Instantiated an idempotent producer.
690314:30:28.122 [virtual-674] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
690414:30:28.122 [virtual-674] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
690514:30:28.122 [virtual-674] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855028122
690614:30:28.122 [data-plane-kafka-request-handler-2] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_2) to the active controller.
690714:30:28.124 [virtual-678] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t5_2=UNKNOWN_TOPIC_OR_PARTITION}
690814:30:28.124 [virtual-678] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
690914:30:28.124 [virtual-675] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Subscribed to topic(s): t5_1
691014:30:28.124 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
691114:30:28.124 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
691214:30:28.124 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_2 with topic ID qqkNnflcQXegpvm0Mj8z9Q.
691314:30:28.125 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_2-0 with topic ID qqkNnflcQXegpvm0Mj8z9Q and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
691414:30:28.125 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
691514:30:28.127 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.Metadata - [Producer clientId=producer-15] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
691614:30:28.127 [virtual-675] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
691714:30:28.127 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-15] ProducerId set to 14 with epoch 0
691814:30:28.127 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
691914:30:28.128 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
692014:30:28.128 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b and requesting the member to rejoin with this id.
692114:30:28.128 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b
692214:30:28.129 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
692314:30:28.129 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b joins group g5_1 in Empty state. Adding to the group now.
692414:30:28.129 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b).
692514:30:28.130 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in PreparingRebalance state. Created a new member id consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800 and requesting the member to rejoin with this id.
692614:30:28.131 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800
692714:30:28.131 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
692814:30:28.131 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800 joins group g5_1 in PreparingRebalance state. Adding to the group now.
692914:30:28.150 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
693014:30:28.151 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_2-0)
693114:30:28.151 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_2-0 with topic id qqkNnflcQXegpvm0Mj8z9Q.
693214:30:28.153 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_2-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
693314:30:28.154 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_2-0 in /tmp/kafka-logs7572554700115704093/t5_2-0 with properties {}
693414:30:28.154 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] No checkpointed highwatermark is found for partition t5_2-0
693514:30:28.154 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] Log loaded for partition t5_2-0 with initial high watermark 0
693614:30:28.154 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_2-0 with topic id Some(qqkNnflcQXegpvm0Mj8z9Q) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
693714:30:34.130 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 1 with 2 members.
693814:30:34.131 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800', protocol='range'}
693914:30:34.131 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b', protocol='range'}
694014:30:34.133 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Finished assignment for group at generation 1: {consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b=Assignment(partitions=[t5_2-0]), consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800=Assignment(partitions=[t5_1-0])}
694114:30:34.133 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b for group g5_1 for generation 1. The group has 2 members, 0 of which are static.
694214:30:34.140 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b', protocol='range'}
694314:30:34.140 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800', protocol='range'}
694414:30:34.140 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_2-0])
694514:30:34.140 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
694614:30:34.140 [virtual-675] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
694714:30:34.140 [virtual-678] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Adding newly assigned partitions: [t5_2-0]
694814:30:34.141 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Found no committed offset for partition t5_2-0
694914:30:34.141 [virtual-675] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Found no committed offset for partition t5_1-0
695014:30:34.143 [virtual-678] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting offset for partition t5_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
695114:30:34.145 [virtual-675] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
695214:30:36.159 [virtual-675] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6953java.lang.InterruptedException: null
6954 ... 18 common frames omitted
6955Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
6956 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
6957 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
6958 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
6959 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
6960 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
6961 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
6962 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
6963 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
6964 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
6965 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
6966 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6967 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6968 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
6969 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
6970 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
6971 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6972 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6973 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
697414:30:36.159 [virtual-681] ERROR o.k.KafkaFlow$ - Exception when polling for records
6975java.lang.InterruptedException: null
6976 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
6977 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
6978 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
6979 at ox.channels.ActorRef.ask(actor.scala:64)
6980 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
6981 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
6982 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6983 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6984 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
6985 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6986 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6987 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
6988 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
6989 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6990 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
6991 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
6992 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
6993 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
6994 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6995 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6996 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
699714:30:36.159 [virtual-678] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6998java.lang.InterruptedException: null
6999 ... 18 common frames omitted
7000Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7001 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7002 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7003 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7004 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7005 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7006 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7007 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7008 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7009 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7010 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7011 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7012 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7013 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7014 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7015 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7016 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7017 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7018 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
701914:30:36.159 [virtual-688] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
702014:30:36.159 [virtual-677] ERROR o.k.KafkaFlow$ - Exception when polling for records
7021java.lang.InterruptedException: null
7022 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7023 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7024 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7025 at ox.channels.ActorRef.ask(actor.scala:64)
7026 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7027 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7028 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7029 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7030 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7031 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7032 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7033 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7034 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7035 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
703614:30:36.160 [virtual-689] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Revoke previously assigned partitions [t5_2-0]
703714:30:36.161 [virtual-689] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Member consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
703814:30:36.161 [virtual-689] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
703914:30:36.161 [virtual-689] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
704014:30:36.161 [virtual-687] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
704114:30:36.162 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Member consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
704214:30:36.162 [virtual-688] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
704314:30:36.162 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
704414:30:36.162 [virtual-687] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
704514:30:36.162 [virtual-688] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
704614:30:36.162 [virtual-688] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
704714:30:36.162 [virtual-688] INFO o.a.k.c.m.Metrics - Metrics reporters closed
704814:30:36.163 [virtual-688] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-15 unregistered
704914:30:36.163 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
705014:30:36.163 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_1-8-a27293e3-29aa-432b-8fc1-a3d17be62f7b) members.).
705114:30:36.163 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-9-876f4807-51a5-4fa0-a586-3955cabb3800 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
705214:30:36.163 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 2 is now empty.
705314:30:36.165 [virtual-689] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
705414:30:36.165 [virtual-689] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
705514:30:36.165 [virtual-689] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
705614:30:36.165 [virtual-689] INFO o.a.k.c.m.Metrics - Metrics reporters closed
705714:30:36.167 [virtual-689] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-8 unregistered
705814:30:36.657 [virtual-687] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
705914:30:36.657 [virtual-687] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
706014:30:36.657 [virtual-687] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
706114:30:36.657 [virtual-687] INFO o.a.k.c.m.Metrics - Metrics reporters closed
706214:30:36.659 [virtual-687] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-9 unregistered
706314:30:36.660 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7064 acks = -1
7065 batch.size = 16384
7066 bootstrap.servers = [localhost:6001]
7067 buffer.memory = 33554432
7068 client.dns.lookup = use_all_dns_ips
7069 client.id = producer-16
7070 compression.gzip.level = -1
7071 compression.lz4.level = 9
7072 compression.type = none
7073 compression.zstd.level = 3
7074 connections.max.idle.ms = 540000
7075 delivery.timeout.ms = 120000
7076 enable.idempotence = true
7077 enable.metrics.push = true
7078 interceptor.classes = []
7079 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7080 linger.ms = 5
7081 max.block.ms = 10000
7082 max.in.flight.requests.per.connection = 5
7083 max.request.size = 1048576
7084 metadata.max.age.ms = 300000
7085 metadata.max.idle.ms = 300000
7086 metadata.recovery.rebootstrap.trigger.ms = 300000
7087 metadata.recovery.strategy = rebootstrap
7088 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7089 metrics.num.samples = 2
7090 metrics.recording.level = INFO
7091 metrics.sample.window.ms = 30000
7092 partitioner.adaptive.partitioning.enable = true
7093 partitioner.availability.timeout.ms = 0
7094 partitioner.class = null
7095 partitioner.ignore.keys = false
7096 receive.buffer.bytes = 32768
7097 reconnect.backoff.max.ms = 1000
7098 reconnect.backoff.ms = 50
7099 request.timeout.ms = 30000
7100 retries = 2147483647
7101 retry.backoff.max.ms = 1000
7102 retry.backoff.ms = 1000
7103 sasl.client.callback.handler.class = null
7104 sasl.jaas.config = null
7105 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7106 sasl.kerberos.min.time.before.relogin = 60000
7107 sasl.kerberos.service.name = null
7108 sasl.kerberos.ticket.renew.jitter = 0.05
7109 sasl.kerberos.ticket.renew.window.factor = 0.8
7110 sasl.login.callback.handler.class = null
7111 sasl.login.class = null
7112 sasl.login.connect.timeout.ms = null
7113 sasl.login.read.timeout.ms = null
7114 sasl.login.refresh.buffer.seconds = 300
7115 sasl.login.refresh.min.period.seconds = 60
7116 sasl.login.refresh.window.factor = 0.8
7117 sasl.login.refresh.window.jitter = 0.05
7118 sasl.login.retry.backoff.max.ms = 10000
7119 sasl.login.retry.backoff.ms = 100
7120 sasl.mechanism = GSSAPI
7121 sasl.oauthbearer.assertion.algorithm = RS256
7122 sasl.oauthbearer.assertion.claim.aud = null
7123 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7124 sasl.oauthbearer.assertion.claim.iss = null
7125 sasl.oauthbearer.assertion.claim.jti.include = false
7126 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7127 sasl.oauthbearer.assertion.claim.sub = null
7128 sasl.oauthbearer.assertion.file = null
7129 sasl.oauthbearer.assertion.private.key.file = null
7130 sasl.oauthbearer.assertion.private.key.passphrase = null
7131 sasl.oauthbearer.assertion.template.file = null
7132 sasl.oauthbearer.client.credentials.client.id = null
7133 sasl.oauthbearer.client.credentials.client.secret = null
7134 sasl.oauthbearer.clock.skew.seconds = 30
7135 sasl.oauthbearer.expected.audience = null
7136 sasl.oauthbearer.expected.issuer = null
7137 sasl.oauthbearer.header.urlencode = false
7138 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7139 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7140 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7141 sasl.oauthbearer.jwks.endpoint.url = null
7142 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7143 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7144 sasl.oauthbearer.scope = null
7145 sasl.oauthbearer.scope.claim.name = scope
7146 sasl.oauthbearer.sub.claim.name = sub
7147 sasl.oauthbearer.token.endpoint.url = null
7148 security.protocol = PLAINTEXT
7149 security.providers = null
7150 send.buffer.bytes = 131072
7151 socket.connection.setup.timeout.max.ms = 30000
7152 socket.connection.setup.timeout.ms = 10000
7153 ssl.cipher.suites = null
7154 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7155 ssl.endpoint.identification.algorithm = https
7156 ssl.engine.factory.class = null
7157 ssl.key.password = null
7158 ssl.keymanager.algorithm = SunX509
7159 ssl.keystore.certificate.chain = null
7160 ssl.keystore.key = null
7161 ssl.keystore.location = null
7162 ssl.keystore.password = null
7163 ssl.keystore.type = JKS
7164 ssl.protocol = TLSv1.3
7165 ssl.provider = null
7166 ssl.secure.random.implementation = null
7167 ssl.trustmanager.algorithm = PKIX
7168 ssl.truststore.certificates = null
7169 ssl.truststore.location = null
7170 ssl.truststore.password = null
7171 ssl.truststore.type = JKS
7172 transaction.timeout.ms = 60000
7173 transaction.two.phase.commit.enable = false
7174 transactional.id = null
7175 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7176
717714:30:36.660 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
717814:30:36.660 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Instantiated an idempotent producer.
717914:30:36.662 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
718014:30:36.662 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
718114:30:36.662 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855036662
718214:30:36.664 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.Metadata - [Producer clientId=producer-16] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
718314:30:36.664 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-16] ProducerId set to 15 with epoch 0
718414:30:36.672 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
718514:30:36.674 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
718614:30:36.674 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
718714:30:36.674 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
718814:30:36.674 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
718914:30:36.674 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-16 unregistered
719014:30:36.675 [virtual-692] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7191 allow.auto.create.topics = true
7192 auto.commit.interval.ms = 5000
7193 auto.offset.reset = earliest
7194 bootstrap.servers = [localhost:6001]
7195 check.crcs = true
7196 client.dns.lookup = use_all_dns_ips
7197 client.id = consumer-g5_1-10
7198 client.rack =
7199 connections.max.idle.ms = 540000
7200 default.api.timeout.ms = 60000
7201 enable.auto.commit = false
7202 enable.metrics.push = true
7203 exclude.internal.topics = true
7204 fetch.max.bytes = 52428800
7205 fetch.max.wait.ms = 500
7206 fetch.min.bytes = 1
7207 group.id = g5_1
7208 group.instance.id = null
7209 group.protocol = classic
7210 group.remote.assignor = null
7211 heartbeat.interval.ms = 3000
7212 interceptor.classes = []
7213 internal.leave.group.on.close = true
7214 internal.throw.on.fetch.stable.offset.unsupported = false
7215 isolation.level = read_uncommitted
7216 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7217 max.partition.fetch.bytes = 1048576
7218 max.poll.interval.ms = 300000
7219 max.poll.records = 500
7220 metadata.max.age.ms = 300000
7221 metadata.recovery.rebootstrap.trigger.ms = 300000
7222 metadata.recovery.strategy = rebootstrap
7223 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7224 metrics.num.samples = 2
7225 metrics.recording.level = INFO
7226 metrics.sample.window.ms = 30000
7227 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7228 receive.buffer.bytes = 65536
7229 reconnect.backoff.max.ms = 1000
7230 reconnect.backoff.ms = 50
7231 request.timeout.ms = 30000
7232 retry.backoff.max.ms = 1000
7233 retry.backoff.ms = 100
7234 sasl.client.callback.handler.class = null
7235 sasl.jaas.config = null
7236 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7237 sasl.kerberos.min.time.before.relogin = 60000
7238 sasl.kerberos.service.name = null
7239 sasl.kerberos.ticket.renew.jitter = 0.05
7240 sasl.kerberos.ticket.renew.window.factor = 0.8
7241 sasl.login.callback.handler.class = null
7242 sasl.login.class = null
7243 sasl.login.connect.timeout.ms = null
7244 sasl.login.read.timeout.ms = null
7245 sasl.login.refresh.buffer.seconds = 300
7246 sasl.login.refresh.min.period.seconds = 60
7247 sasl.login.refresh.window.factor = 0.8
7248 sasl.login.refresh.window.jitter = 0.05
7249 sasl.login.retry.backoff.max.ms = 10000
7250 sasl.login.retry.backoff.ms = 100
7251 sasl.mechanism = GSSAPI
7252 sasl.oauthbearer.assertion.algorithm = RS256
7253 sasl.oauthbearer.assertion.claim.aud = null
7254 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7255 sasl.oauthbearer.assertion.claim.iss = null
7256 sasl.oauthbearer.assertion.claim.jti.include = false
7257 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7258 sasl.oauthbearer.assertion.claim.sub = null
7259 sasl.oauthbearer.assertion.file = null
7260 sasl.oauthbearer.assertion.private.key.file = null
7261 sasl.oauthbearer.assertion.private.key.passphrase = null
7262 sasl.oauthbearer.assertion.template.file = null
7263 sasl.oauthbearer.client.credentials.client.id = null
7264 sasl.oauthbearer.client.credentials.client.secret = null
7265 sasl.oauthbearer.clock.skew.seconds = 30
7266 sasl.oauthbearer.expected.audience = null
7267 sasl.oauthbearer.expected.issuer = null
7268 sasl.oauthbearer.header.urlencode = false
7269 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7270 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7271 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7272 sasl.oauthbearer.jwks.endpoint.url = null
7273 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7274 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7275 sasl.oauthbearer.scope = null
7276 sasl.oauthbearer.scope.claim.name = scope
7277 sasl.oauthbearer.sub.claim.name = sub
7278 sasl.oauthbearer.token.endpoint.url = null
7279 security.protocol = PLAINTEXT
7280 security.providers = null
7281 send.buffer.bytes = 131072
7282 session.timeout.ms = 45000
7283 share.acknowledgement.mode = implicit
7284 socket.connection.setup.timeout.max.ms = 30000
7285 socket.connection.setup.timeout.ms = 10000
7286 ssl.cipher.suites = null
7287 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7288 ssl.endpoint.identification.algorithm = https
7289 ssl.engine.factory.class = null
7290 ssl.key.password = null
7291 ssl.keymanager.algorithm = SunX509
7292 ssl.keystore.certificate.chain = null
7293 ssl.keystore.key = null
7294 ssl.keystore.location = null
7295 ssl.keystore.password = null
7296 ssl.keystore.type = JKS
7297 ssl.protocol = TLSv1.3
7298 ssl.provider = null
7299 ssl.secure.random.implementation = null
7300 ssl.trustmanager.algorithm = PKIX
7301 ssl.truststore.certificates = null
7302 ssl.truststore.location = null
7303 ssl.truststore.password = null
7304 ssl.truststore.type = JKS
7305 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7306
730714:30:36.675 [virtual-692] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
730814:30:36.678 [virtual-692] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
730914:30:36.678 [virtual-692] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
731014:30:36.678 [virtual-692] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855036678
731114:30:36.679 [virtual-695] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Subscribed to topic(s): t5_1
731214:30:36.681 [virtual-695] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
731314:30:36.682 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
731414:30:36.683 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
731514:30:36.686 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab and requesting the member to rejoin with this id.
731614:30:36.686 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab
731714:30:36.686 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
731814:30:36.686 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab joins group g5_1 in Empty state. Adding to the group now.
731914:30:36.687 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab).
732014:30:39.687 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 3 with 1 members.
732114:30:39.687 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab', protocol='range'}
732214:30:39.688 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Finished assignment for group at generation 3: {consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab=Assignment(partitions=[t5_1-0])}
732314:30:39.688 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab for group g5_1 for generation 3. The group has 1 members, 0 of which are static.
732414:30:39.694 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab', protocol='range'}
732514:30:39.695 [virtual-695] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
732614:30:39.695 [virtual-695] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
732714:30:39.696 [virtual-695] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t5_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
732814:30:39.700 [virtual-692] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7329 allow.auto.create.topics = true
7330 auto.commit.interval.ms = 5000
7331 auto.offset.reset = earliest
7332 bootstrap.servers = [localhost:6001]
7333 check.crcs = true
7334 client.dns.lookup = use_all_dns_ips
7335 client.id = consumer-g5_2-11
7336 client.rack =
7337 connections.max.idle.ms = 540000
7338 default.api.timeout.ms = 60000
7339 enable.auto.commit = false
7340 enable.metrics.push = true
7341 exclude.internal.topics = true
7342 fetch.max.bytes = 52428800
7343 fetch.max.wait.ms = 500
7344 fetch.min.bytes = 1
7345 group.id = g5_2
7346 group.instance.id = null
7347 group.protocol = classic
7348 group.remote.assignor = null
7349 heartbeat.interval.ms = 3000
7350 interceptor.classes = []
7351 internal.leave.group.on.close = true
7352 internal.throw.on.fetch.stable.offset.unsupported = false
7353 isolation.level = read_uncommitted
7354 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7355 max.partition.fetch.bytes = 1048576
7356 max.poll.interval.ms = 300000
7357 max.poll.records = 500
7358 metadata.max.age.ms = 300000
7359 metadata.recovery.rebootstrap.trigger.ms = 300000
7360 metadata.recovery.strategy = rebootstrap
7361 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7362 metrics.num.samples = 2
7363 metrics.recording.level = INFO
7364 metrics.sample.window.ms = 30000
7365 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7366 receive.buffer.bytes = 65536
7367 reconnect.backoff.max.ms = 1000
7368 reconnect.backoff.ms = 50
7369 request.timeout.ms = 30000
7370 retry.backoff.max.ms = 1000
7371 retry.backoff.ms = 100
7372 sasl.client.callback.handler.class = null
7373 sasl.jaas.config = null
7374 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7375 sasl.kerberos.min.time.before.relogin = 60000
7376 sasl.kerberos.service.name = null
7377 sasl.kerberos.ticket.renew.jitter = 0.05
7378 sasl.kerberos.ticket.renew.window.factor = 0.8
7379 sasl.login.callback.handler.class = null
7380 sasl.login.class = null
7381 sasl.login.connect.timeout.ms = null
7382 sasl.login.read.timeout.ms = null
7383 sasl.login.refresh.buffer.seconds = 300
7384 sasl.login.refresh.min.period.seconds = 60
7385 sasl.login.refresh.window.factor = 0.8
7386 sasl.login.refresh.window.jitter = 0.05
7387 sasl.login.retry.backoff.max.ms = 10000
7388 sasl.login.retry.backoff.ms = 100
7389 sasl.mechanism = GSSAPI
7390 sasl.oauthbearer.assertion.algorithm = RS256
7391 sasl.oauthbearer.assertion.claim.aud = null
7392 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7393 sasl.oauthbearer.assertion.claim.iss = null
7394 sasl.oauthbearer.assertion.claim.jti.include = false
7395 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7396 sasl.oauthbearer.assertion.claim.sub = null
7397 sasl.oauthbearer.assertion.file = null
7398 sasl.oauthbearer.assertion.private.key.file = null
7399 sasl.oauthbearer.assertion.private.key.passphrase = null
7400 sasl.oauthbearer.assertion.template.file = null
7401 sasl.oauthbearer.client.credentials.client.id = null
7402 sasl.oauthbearer.client.credentials.client.secret = null
7403 sasl.oauthbearer.clock.skew.seconds = 30
7404 sasl.oauthbearer.expected.audience = null
7405 sasl.oauthbearer.expected.issuer = null
7406 sasl.oauthbearer.header.urlencode = false
7407 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7408 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7409 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7410 sasl.oauthbearer.jwks.endpoint.url = null
7411 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7412 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7413 sasl.oauthbearer.scope = null
7414 sasl.oauthbearer.scope.claim.name = scope
7415 sasl.oauthbearer.sub.claim.name = sub
7416 sasl.oauthbearer.token.endpoint.url = null
7417 security.protocol = PLAINTEXT
7418 security.providers = null
7419 send.buffer.bytes = 131072
7420 session.timeout.ms = 45000
7421 share.acknowledgement.mode = implicit
7422 socket.connection.setup.timeout.max.ms = 30000
7423 socket.connection.setup.timeout.ms = 10000
7424 ssl.cipher.suites = null
7425 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7426 ssl.endpoint.identification.algorithm = https
7427 ssl.engine.factory.class = null
7428 ssl.key.password = null
7429 ssl.keymanager.algorithm = SunX509
7430 ssl.keystore.certificate.chain = null
7431 ssl.keystore.key = null
7432 ssl.keystore.location = null
7433 ssl.keystore.password = null
7434 ssl.keystore.type = JKS
7435 ssl.protocol = TLSv1.3
7436 ssl.provider = null
7437 ssl.secure.random.implementation = null
7438 ssl.trustmanager.algorithm = PKIX
7439 ssl.truststore.certificates = null
7440 ssl.truststore.location = null
7441 ssl.truststore.password = null
7442 ssl.truststore.type = JKS
7443 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7444
744514:30:39.700 [virtual-692] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
744614:30:39.702 [virtual-692] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
744714:30:39.702 [virtual-692] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
744814:30:39.702 [virtual-692] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855039702
744914:30:39.703 [virtual-699] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Subscribed to topic(s): t5_1
745014:30:39.706 [virtual-699] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
745114:30:39.706 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
745214:30:39.707 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
745314:30:39.709 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_2 in Empty state. Created a new member id consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a and requesting the member to rejoin with this id.
745414:30:39.710 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: need to re-join with the given member-id: consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a
745514:30:39.710 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
745614:30:39.710 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a joins group g5_2 in Empty state. Adding to the group now.
745714:30:39.710 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a).
745814:30:42.711 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_2 generation 1 with 1 members.
745914:30:42.712 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a', protocol='range'}
746014:30:42.712 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Finished assignment for group at generation 1: {consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a=Assignment(partitions=[t5_1-0])}
746114:30:42.713 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a for group g5_2 for generation 1. The group has 1 members, 0 of which are static.
746214:30:42.719 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a', protocol='range'}
746314:30:42.719 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Notifying assignor about the new Assignment(partitions=[t5_1-0])
746414:30:42.719 [virtual-699] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Adding newly assigned partitions: [t5_1-0]
746514:30:42.720 [virtual-699] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Found no committed offset for partition t5_1-0
746614:30:42.722 [virtual-699] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
746714:30:42.725 [virtual-698] ERROR o.k.KafkaFlow$ - Exception when polling for records
7468java.lang.InterruptedException: null
7469 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7470 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7471 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7472 at ox.channels.ActorRef.ask(actor.scala:64)
7473 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7474 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7475 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7476 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7477 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7478 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7479 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7480 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7481 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7482 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
748314:30:42.725 [virtual-694] ERROR o.k.KafkaFlow$ - Exception when polling for records
7484java.lang.InterruptedException: null
7485 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7486 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7487 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7488 at ox.channels.ActorRef.ask(actor.scala:64)
7489 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7490 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7491 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7492 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7493 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7494 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7495 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7496 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7497 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7498 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
749914:30:42.725 [virtual-695] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7500java.lang.InterruptedException: null
7501 ... 18 common frames omitted
7502Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7503 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7504 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7505 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7506 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7507 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7508 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7509 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7510 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7511 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7512 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7513 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7514 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7515 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7516 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7517 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7518 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7519 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7520 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
752114:30:42.725 [virtual-699] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7522java.lang.InterruptedException: null
7523 ... 18 common frames omitted
7524Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7525 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7526 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7527 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7528 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7529 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7530 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7531 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7532 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7533 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7534 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7535 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7536 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7537 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7538 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7539 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7540 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7541 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7542 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
754314:30:42.726 [virtual-701] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
754414:30:42.726 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Member consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
754514:30:42.726 [virtual-702] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Revoke previously assigned partitions [t5_1-0]
754614:30:42.726 [virtual-702] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Member consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
754714:30:42.726 [virtual-702] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting generation and member id due to: consumer pro-actively leaving the group
754814:30:42.726 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
754914:30:42.727 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
755014:30:42.726 [virtual-702] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: consumer pro-actively leaving the group
755114:30:42.727 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_2] Member consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
755214:30:42.727 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_2-11-607be29d-f3c5-4620-92d3-a5492a10973a) members.).
755314:30:42.727 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_2 with generation 2 is now empty.
755414:30:42.727 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
755514:30:42.727 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g5_1-10-51771287-90e7-4325-ab5d-878448d0f5ab) members.).
755614:30:42.727 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 4 is now empty.
755714:30:43.211 [virtual-701] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
755814:30:43.211 [virtual-701] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
755914:30:43.212 [virtual-701] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
756014:30:43.212 [virtual-701] INFO o.a.k.c.m.Metrics - Metrics reporters closed
756114:30:43.213 [virtual-701] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-10 unregistered
756214:30:43.226 [virtual-702] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
756314:30:43.226 [virtual-702] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
756414:30:43.226 [virtual-702] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
756514:30:43.226 [virtual-702] INFO o.a.k.c.m.Metrics - Metrics reporters closed
756614:30:43.228 [virtual-702] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_2-11 unregistered
756714:30:43.230 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7568 acks = -1
7569 batch.size = 16384
7570 bootstrap.servers = [localhost:6001]
7571 buffer.memory = 33554432
7572 client.dns.lookup = use_all_dns_ips
7573 client.id = producer-17
7574 compression.gzip.level = -1
7575 compression.lz4.level = 9
7576 compression.type = none
7577 compression.zstd.level = 3
7578 connections.max.idle.ms = 540000
7579 delivery.timeout.ms = 120000
7580 enable.idempotence = true
7581 enable.metrics.push = true
7582 interceptor.classes = []
7583 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7584 linger.ms = 5
7585 max.block.ms = 10000
7586 max.in.flight.requests.per.connection = 5
7587 max.request.size = 1048576
7588 metadata.max.age.ms = 300000
7589 metadata.max.idle.ms = 300000
7590 metadata.recovery.rebootstrap.trigger.ms = 300000
7591 metadata.recovery.strategy = rebootstrap
7592 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7593 metrics.num.samples = 2
7594 metrics.recording.level = INFO
7595 metrics.sample.window.ms = 30000
7596 partitioner.adaptive.partitioning.enable = true
7597 partitioner.availability.timeout.ms = 0
7598 partitioner.class = null
7599 partitioner.ignore.keys = false
7600 receive.buffer.bytes = 32768
7601 reconnect.backoff.max.ms = 1000
7602 reconnect.backoff.ms = 50
7603 request.timeout.ms = 30000
7604 retries = 2147483647
7605 retry.backoff.max.ms = 1000
7606 retry.backoff.ms = 1000
7607 sasl.client.callback.handler.class = null
7608 sasl.jaas.config = null
7609 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7610 sasl.kerberos.min.time.before.relogin = 60000
7611 sasl.kerberos.service.name = null
7612 sasl.kerberos.ticket.renew.jitter = 0.05
7613 sasl.kerberos.ticket.renew.window.factor = 0.8
7614 sasl.login.callback.handler.class = null
7615 sasl.login.class = null
7616 sasl.login.connect.timeout.ms = null
7617 sasl.login.read.timeout.ms = null
7618 sasl.login.refresh.buffer.seconds = 300
7619 sasl.login.refresh.min.period.seconds = 60
7620 sasl.login.refresh.window.factor = 0.8
7621 sasl.login.refresh.window.jitter = 0.05
7622 sasl.login.retry.backoff.max.ms = 10000
7623 sasl.login.retry.backoff.ms = 100
7624 sasl.mechanism = GSSAPI
7625 sasl.oauthbearer.assertion.algorithm = RS256
7626 sasl.oauthbearer.assertion.claim.aud = null
7627 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7628 sasl.oauthbearer.assertion.claim.iss = null
7629 sasl.oauthbearer.assertion.claim.jti.include = false
7630 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7631 sasl.oauthbearer.assertion.claim.sub = null
7632 sasl.oauthbearer.assertion.file = null
7633 sasl.oauthbearer.assertion.private.key.file = null
7634 sasl.oauthbearer.assertion.private.key.passphrase = null
7635 sasl.oauthbearer.assertion.template.file = null
7636 sasl.oauthbearer.client.credentials.client.id = null
7637 sasl.oauthbearer.client.credentials.client.secret = null
7638 sasl.oauthbearer.clock.skew.seconds = 30
7639 sasl.oauthbearer.expected.audience = null
7640 sasl.oauthbearer.expected.issuer = null
7641 sasl.oauthbearer.header.urlencode = false
7642 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7643 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7644 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7645 sasl.oauthbearer.jwks.endpoint.url = null
7646 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7647 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7648 sasl.oauthbearer.scope = null
7649 sasl.oauthbearer.scope.claim.name = scope
7650 sasl.oauthbearer.sub.claim.name = sub
7651 sasl.oauthbearer.token.endpoint.url = null
7652 security.protocol = PLAINTEXT
7653 security.providers = null
7654 send.buffer.bytes = 131072
7655 socket.connection.setup.timeout.max.ms = 30000
7656 socket.connection.setup.timeout.ms = 10000
7657 ssl.cipher.suites = null
7658 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7659 ssl.endpoint.identification.algorithm = https
7660 ssl.engine.factory.class = null
7661 ssl.key.password = null
7662 ssl.keymanager.algorithm = SunX509
7663 ssl.keystore.certificate.chain = null
7664 ssl.keystore.key = null
7665 ssl.keystore.location = null
7666 ssl.keystore.password = null
7667 ssl.keystore.type = JKS
7668 ssl.protocol = TLSv1.3
7669 ssl.provider = null
7670 ssl.secure.random.implementation = null
7671 ssl.trustmanager.algorithm = PKIX
7672 ssl.truststore.certificates = null
7673 ssl.truststore.location = null
7674 ssl.truststore.password = null
7675 ssl.truststore.type = JKS
7676 transaction.timeout.ms = 60000
7677 transaction.two.phase.commit.enable = false
7678 transactional.id = null
7679 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7680
768114:30:43.230 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
768214:30:43.230 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Instantiated an idempotent producer.
768314:30:43.232 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
768414:30:43.232 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
768514:30:43.232 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855043232
768614:30:43.234 [data-plane-kafka-request-handler-7] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t6_1) to the active controller.
768714:30:43.236 [kafka-producer-network-thread | producer-17] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-17] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t6_1=UNKNOWN_TOPIC_OR_PARTITION}
768814:30:43.236 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.Metadata - [Producer clientId=producer-17] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
768914:30:43.236 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-17] ProducerId set to 16 with epoch 0
769014:30:43.237 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t6_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
769114:30:43.238 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t6_1 with topic ID vahTgLqlSzqcf2w0gEjJ6g.
769214:30:43.238 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t6_1-0 with topic ID vahTgLqlSzqcf2w0gEjJ6g and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
769314:30:43.263 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
769414:30:43.263 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t6_1-0)
769514:30:43.263 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t6_1-0 with topic id vahTgLqlSzqcf2w0gEjJ6g.
769614:30:43.266 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t6_1-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
769714:30:43.267 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t6_1-0 in /tmp/kafka-logs7572554700115704093/t6_1-0 with properties {}
769814:30:43.268 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] No checkpointed highwatermark is found for partition t6_1-0
769914:30:43.268 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] Log loaded for partition t6_1-0 with initial high watermark 0
770014:30:43.268 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t6_1-0 with topic id Some(vahTgLqlSzqcf2w0gEjJ6g) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
770114:30:44.247 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
770214:30:44.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
770314:30:44.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
770414:30:44.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
770514:30:44.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
770614:30:44.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-17 unregistered
770714:30:44.249 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7708 acks = -1
7709 batch.size = 16384
7710 bootstrap.servers = [localhost:6001]
7711 buffer.memory = 33554432
7712 client.dns.lookup = use_all_dns_ips
7713 client.id = producer-18
7714 compression.gzip.level = -1
7715 compression.lz4.level = 9
7716 compression.type = none
7717 compression.zstd.level = 3
7718 connections.max.idle.ms = 540000
7719 delivery.timeout.ms = 120000
7720 enable.idempotence = true
7721 enable.metrics.push = true
7722 interceptor.classes = []
7723 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7724 linger.ms = 5
7725 max.block.ms = 10000
7726 max.in.flight.requests.per.connection = 5
7727 max.request.size = 1048576
7728 metadata.max.age.ms = 300000
7729 metadata.max.idle.ms = 300000
7730 metadata.recovery.rebootstrap.trigger.ms = 300000
7731 metadata.recovery.strategy = rebootstrap
7732 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7733 metrics.num.samples = 2
7734 metrics.recording.level = INFO
7735 metrics.sample.window.ms = 30000
7736 partitioner.adaptive.partitioning.enable = true
7737 partitioner.availability.timeout.ms = 0
7738 partitioner.class = null
7739 partitioner.ignore.keys = false
7740 receive.buffer.bytes = 32768
7741 reconnect.backoff.max.ms = 1000
7742 reconnect.backoff.ms = 50
7743 request.timeout.ms = 30000
7744 retries = 2147483647
7745 retry.backoff.max.ms = 1000
7746 retry.backoff.ms = 1000
7747 sasl.client.callback.handler.class = null
7748 sasl.jaas.config = null
7749 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7750 sasl.kerberos.min.time.before.relogin = 60000
7751 sasl.kerberos.service.name = null
7752 sasl.kerberos.ticket.renew.jitter = 0.05
7753 sasl.kerberos.ticket.renew.window.factor = 0.8
7754 sasl.login.callback.handler.class = null
7755 sasl.login.class = null
7756 sasl.login.connect.timeout.ms = null
7757 sasl.login.read.timeout.ms = null
7758 sasl.login.refresh.buffer.seconds = 300
7759 sasl.login.refresh.min.period.seconds = 60
7760 sasl.login.refresh.window.factor = 0.8
7761 sasl.login.refresh.window.jitter = 0.05
7762 sasl.login.retry.backoff.max.ms = 10000
7763 sasl.login.retry.backoff.ms = 100
7764 sasl.mechanism = GSSAPI
7765 sasl.oauthbearer.assertion.algorithm = RS256
7766 sasl.oauthbearer.assertion.claim.aud = null
7767 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7768 sasl.oauthbearer.assertion.claim.iss = null
7769 sasl.oauthbearer.assertion.claim.jti.include = false
7770 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7771 sasl.oauthbearer.assertion.claim.sub = null
7772 sasl.oauthbearer.assertion.file = null
7773 sasl.oauthbearer.assertion.private.key.file = null
7774 sasl.oauthbearer.assertion.private.key.passphrase = null
7775 sasl.oauthbearer.assertion.template.file = null
7776 sasl.oauthbearer.client.credentials.client.id = null
7777 sasl.oauthbearer.client.credentials.client.secret = null
7778 sasl.oauthbearer.clock.skew.seconds = 30
7779 sasl.oauthbearer.expected.audience = null
7780 sasl.oauthbearer.expected.issuer = null
7781 sasl.oauthbearer.header.urlencode = false
7782 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7783 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7784 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7785 sasl.oauthbearer.jwks.endpoint.url = null
7786 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7787 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7788 sasl.oauthbearer.scope = null
7789 sasl.oauthbearer.scope.claim.name = scope
7790 sasl.oauthbearer.sub.claim.name = sub
7791 sasl.oauthbearer.token.endpoint.url = null
7792 security.protocol = PLAINTEXT
7793 security.providers = null
7794 send.buffer.bytes = 131072
7795 socket.connection.setup.timeout.max.ms = 30000
7796 socket.connection.setup.timeout.ms = 10000
7797 ssl.cipher.suites = null
7798 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7799 ssl.endpoint.identification.algorithm = https
7800 ssl.engine.factory.class = null
7801 ssl.key.password = null
7802 ssl.keymanager.algorithm = SunX509
7803 ssl.keystore.certificate.chain = null
7804 ssl.keystore.key = null
7805 ssl.keystore.location = null
7806 ssl.keystore.password = null
7807 ssl.keystore.type = JKS
7808 ssl.protocol = TLSv1.3
7809 ssl.provider = null
7810 ssl.secure.random.implementation = null
7811 ssl.trustmanager.algorithm = PKIX
7812 ssl.truststore.certificates = null
7813 ssl.truststore.location = null
7814 ssl.truststore.password = null
7815 ssl.truststore.type = JKS
7816 transaction.timeout.ms = 60000
7817 transaction.two.phase.commit.enable = false
7818 transactional.id = null
7819 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7820
782114:30:44.250 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
782214:30:44.250 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Instantiated an idempotent producer.
782314:30:44.252 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
782414:30:44.252 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
782514:30:44.252 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855044252
782614:30:44.256 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.Metadata - [Producer clientId=producer-18] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
782714:30:44.257 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-18] ProducerId set to 17 with epoch 0
782814:30:44.266 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
782914:30:44.267 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
783014:30:44.268 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
783114:30:44.268 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
783214:30:44.268 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
783314:30:44.269 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-18 unregistered
783414:30:44.269 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7835 acks = -1
7836 batch.size = 16384
7837 bootstrap.servers = [localhost:6001]
7838 buffer.memory = 33554432
7839 client.dns.lookup = use_all_dns_ips
7840 client.id = producer-19
7841 compression.gzip.level = -1
7842 compression.lz4.level = 9
7843 compression.type = none
7844 compression.zstd.level = 3
7845 connections.max.idle.ms = 540000
7846 delivery.timeout.ms = 120000
7847 enable.idempotence = true
7848 enable.metrics.push = true
7849 interceptor.classes = []
7850 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7851 linger.ms = 5
7852 max.block.ms = 10000
7853 max.in.flight.requests.per.connection = 5
7854 max.request.size = 1048576
7855 metadata.max.age.ms = 300000
7856 metadata.max.idle.ms = 300000
7857 metadata.recovery.rebootstrap.trigger.ms = 300000
7858 metadata.recovery.strategy = rebootstrap
7859 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7860 metrics.num.samples = 2
7861 metrics.recording.level = INFO
7862 metrics.sample.window.ms = 30000
7863 partitioner.adaptive.partitioning.enable = true
7864 partitioner.availability.timeout.ms = 0
7865 partitioner.class = null
7866 partitioner.ignore.keys = false
7867 receive.buffer.bytes = 32768
7868 reconnect.backoff.max.ms = 1000
7869 reconnect.backoff.ms = 50
7870 request.timeout.ms = 30000
7871 retries = 2147483647
7872 retry.backoff.max.ms = 1000
7873 retry.backoff.ms = 1000
7874 sasl.client.callback.handler.class = null
7875 sasl.jaas.config = null
7876 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7877 sasl.kerberos.min.time.before.relogin = 60000
7878 sasl.kerberos.service.name = null
7879 sasl.kerberos.ticket.renew.jitter = 0.05
7880 sasl.kerberos.ticket.renew.window.factor = 0.8
7881 sasl.login.callback.handler.class = null
7882 sasl.login.class = null
7883 sasl.login.connect.timeout.ms = null
7884 sasl.login.read.timeout.ms = null
7885 sasl.login.refresh.buffer.seconds = 300
7886 sasl.login.refresh.min.period.seconds = 60
7887 sasl.login.refresh.window.factor = 0.8
7888 sasl.login.refresh.window.jitter = 0.05
7889 sasl.login.retry.backoff.max.ms = 10000
7890 sasl.login.retry.backoff.ms = 100
7891 sasl.mechanism = GSSAPI
7892 sasl.oauthbearer.assertion.algorithm = RS256
7893 sasl.oauthbearer.assertion.claim.aud = null
7894 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7895 sasl.oauthbearer.assertion.claim.iss = null
7896 sasl.oauthbearer.assertion.claim.jti.include = false
7897 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7898 sasl.oauthbearer.assertion.claim.sub = null
7899 sasl.oauthbearer.assertion.file = null
7900 sasl.oauthbearer.assertion.private.key.file = null
7901 sasl.oauthbearer.assertion.private.key.passphrase = null
7902 sasl.oauthbearer.assertion.template.file = null
7903 sasl.oauthbearer.client.credentials.client.id = null
7904 sasl.oauthbearer.client.credentials.client.secret = null
7905 sasl.oauthbearer.clock.skew.seconds = 30
7906 sasl.oauthbearer.expected.audience = null
7907 sasl.oauthbearer.expected.issuer = null
7908 sasl.oauthbearer.header.urlencode = false
7909 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7910 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7911 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7912 sasl.oauthbearer.jwks.endpoint.url = null
7913 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7914 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7915 sasl.oauthbearer.scope = null
7916 sasl.oauthbearer.scope.claim.name = scope
7917 sasl.oauthbearer.sub.claim.name = sub
7918 sasl.oauthbearer.token.endpoint.url = null
7919 security.protocol = PLAINTEXT
7920 security.providers = null
7921 send.buffer.bytes = 131072
7922 socket.connection.setup.timeout.max.ms = 30000
7923 socket.connection.setup.timeout.ms = 10000
7924 ssl.cipher.suites = null
7925 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7926 ssl.endpoint.identification.algorithm = https
7927 ssl.engine.factory.class = null
7928 ssl.key.password = null
7929 ssl.keymanager.algorithm = SunX509
7930 ssl.keystore.certificate.chain = null
7931 ssl.keystore.key = null
7932 ssl.keystore.location = null
7933 ssl.keystore.password = null
7934 ssl.keystore.type = JKS
7935 ssl.protocol = TLSv1.3
7936 ssl.provider = null
7937 ssl.secure.random.implementation = null
7938 ssl.trustmanager.algorithm = PKIX
7939 ssl.truststore.certificates = null
7940 ssl.truststore.location = null
7941 ssl.truststore.password = null
7942 ssl.truststore.type = JKS
7943 transaction.timeout.ms = 60000
7944 transaction.two.phase.commit.enable = false
7945 transactional.id = null
7946 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7947
794814:30:44.269 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
794914:30:44.270 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Instantiated an idempotent producer.
795014:30:44.271 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
795114:30:44.271 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
795214:30:44.271 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855044271
795314:30:44.274 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.Metadata - [Producer clientId=producer-19] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
795414:30:44.275 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-19] ProducerId set to 18 with epoch 0
795514:30:44.283 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
795614:30:44.285 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
795714:30:44.285 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
795814:30:44.285 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
795914:30:44.285 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
796014:30:44.285 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-19 unregistered
796114:30:44.287 [virtual-708] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7962 allow.auto.create.topics = true
7963 auto.commit.interval.ms = 5000
7964 auto.offset.reset = earliest
7965 bootstrap.servers = [localhost:6001]
7966 check.crcs = true
7967 client.dns.lookup = use_all_dns_ips
7968 client.id = consumer-g6_1-12
7969 client.rack =
7970 connections.max.idle.ms = 540000
7971 default.api.timeout.ms = 60000
7972 enable.auto.commit = false
7973 enable.metrics.push = true
7974 exclude.internal.topics = true
7975 fetch.max.bytes = 52428800
7976 fetch.max.wait.ms = 500
7977 fetch.min.bytes = 1
7978 group.id = g6_1
7979 group.instance.id = null
7980 group.protocol = classic
7981 group.remote.assignor = null
7982 heartbeat.interval.ms = 3000
7983 interceptor.classes = []
7984 internal.leave.group.on.close = true
7985 internal.throw.on.fetch.stable.offset.unsupported = false
7986 isolation.level = read_uncommitted
7987 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7988 max.partition.fetch.bytes = 1048576
7989 max.poll.interval.ms = 300000
7990 max.poll.records = 500
7991 metadata.max.age.ms = 300000
7992 metadata.recovery.rebootstrap.trigger.ms = 300000
7993 metadata.recovery.strategy = rebootstrap
7994 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7995 metrics.num.samples = 2
7996 metrics.recording.level = INFO
7997 metrics.sample.window.ms = 30000
7998 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7999 receive.buffer.bytes = 65536
8000 reconnect.backoff.max.ms = 1000
8001 reconnect.backoff.ms = 50
8002 request.timeout.ms = 30000
8003 retry.backoff.max.ms = 1000
8004 retry.backoff.ms = 100
8005 sasl.client.callback.handler.class = null
8006 sasl.jaas.config = null
8007 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8008 sasl.kerberos.min.time.before.relogin = 60000
8009 sasl.kerberos.service.name = null
8010 sasl.kerberos.ticket.renew.jitter = 0.05
8011 sasl.kerberos.ticket.renew.window.factor = 0.8
8012 sasl.login.callback.handler.class = null
8013 sasl.login.class = null
8014 sasl.login.connect.timeout.ms = null
8015 sasl.login.read.timeout.ms = null
8016 sasl.login.refresh.buffer.seconds = 300
8017 sasl.login.refresh.min.period.seconds = 60
8018 sasl.login.refresh.window.factor = 0.8
8019 sasl.login.refresh.window.jitter = 0.05
8020 sasl.login.retry.backoff.max.ms = 10000
8021 sasl.login.retry.backoff.ms = 100
8022 sasl.mechanism = GSSAPI
8023 sasl.oauthbearer.assertion.algorithm = RS256
8024 sasl.oauthbearer.assertion.claim.aud = null
8025 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8026 sasl.oauthbearer.assertion.claim.iss = null
8027 sasl.oauthbearer.assertion.claim.jti.include = false
8028 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8029 sasl.oauthbearer.assertion.claim.sub = null
8030 sasl.oauthbearer.assertion.file = null
8031 sasl.oauthbearer.assertion.private.key.file = null
8032 sasl.oauthbearer.assertion.private.key.passphrase = null
8033 sasl.oauthbearer.assertion.template.file = null
8034 sasl.oauthbearer.client.credentials.client.id = null
8035 sasl.oauthbearer.client.credentials.client.secret = null
8036 sasl.oauthbearer.clock.skew.seconds = 30
8037 sasl.oauthbearer.expected.audience = null
8038 sasl.oauthbearer.expected.issuer = null
8039 sasl.oauthbearer.header.urlencode = false
8040 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8041 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8042 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8043 sasl.oauthbearer.jwks.endpoint.url = null
8044 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8045 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8046 sasl.oauthbearer.scope = null
8047 sasl.oauthbearer.scope.claim.name = scope
8048 sasl.oauthbearer.sub.claim.name = sub
8049 sasl.oauthbearer.token.endpoint.url = null
8050 security.protocol = PLAINTEXT
8051 security.providers = null
8052 send.buffer.bytes = 131072
8053 session.timeout.ms = 45000
8054 share.acknowledgement.mode = implicit
8055 socket.connection.setup.timeout.max.ms = 30000
8056 socket.connection.setup.timeout.ms = 10000
8057 ssl.cipher.suites = null
8058 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8059 ssl.endpoint.identification.algorithm = https
8060 ssl.engine.factory.class = null
8061 ssl.key.password = null
8062 ssl.keymanager.algorithm = SunX509
8063 ssl.keystore.certificate.chain = null
8064 ssl.keystore.key = null
8065 ssl.keystore.location = null
8066 ssl.keystore.password = null
8067 ssl.keystore.type = JKS
8068 ssl.protocol = TLSv1.3
8069 ssl.provider = null
8070 ssl.secure.random.implementation = null
8071 ssl.trustmanager.algorithm = PKIX
8072 ssl.truststore.certificates = null
8073 ssl.truststore.location = null
8074 ssl.truststore.password = null
8075 ssl.truststore.type = JKS
8076 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8077
807814:30:44.287 [virtual-708] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
807914:30:44.290 [virtual-708] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
808014:30:44.290 [virtual-708] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
808114:30:44.290 [virtual-708] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855044290
808214:30:44.293 [virtual-709] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Subscribed to topic(s): t6_1
808314:30:44.296 [virtual-709] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
808414:30:44.298 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
808514:30:44.299 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
808614:30:44.301 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370 and requesting the member to rejoin with this id.
808714:30:44.301 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370
808814:30:44.302 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
808914:30:44.303 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370 joins group g6_1 in Empty state. Adding to the group now.
809014:30:44.303 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370).
809114:30:47.303 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 1 with 1 members.
809214:30:47.303 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370', protocol='range'}
809314:30:47.303 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Finished assignment for group at generation 1: {consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370=Assignment(partitions=[t6_1-0])}
809414:30:47.304 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370 for group g6_1 for generation 1. The group has 1 members, 0 of which are static.
809514:30:47.310 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370', protocol='range'}
809614:30:47.310 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
809714:30:47.310 [virtual-709] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
809814:30:47.311 [virtual-709] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Found no committed offset for partition t6_1-0
809914:30:47.314 [virtual-709] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
810014:30:49.317 [virtual-709] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8101java.lang.InterruptedException: null
8102 ... 18 common frames omitted
8103Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8104 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8105 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8106 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8107 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8108 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8109 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8110 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8111 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8112 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8113 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8114 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8115 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8116 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8117 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8118 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8119 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8120 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8121 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
812214:30:49.317 [virtual-711] ERROR o.k.KafkaFlow$ - Exception when polling for records
8123java.lang.InterruptedException: null
8124 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8125 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8126 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8127 at ox.channels.ActorRef.ask(actor.scala:64)
8128 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8129 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
8130 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8131 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8132 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
8133 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
8134 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
8135 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
8136 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8137 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
8138 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
8139 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
8140 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
8141 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8142 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8143 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
814414:30:49.318 [virtual-716] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
814514:30:49.318 [virtual-716] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Member consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
814614:30:49.318 [virtual-716] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
814714:30:49.318 [virtual-716] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
814814:30:49.318 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
814914:30:49.318 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_1-12-c9ba0da9-5eef-4368-b33a-5d9854bad370) members.).
815014:30:49.318 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 2 is now empty.
815114:30:49.325 [virtual-716] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
815214:30:49.325 [virtual-716] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
815314:30:49.325 [virtual-716] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
815414:30:49.325 [virtual-716] INFO o.a.k.c.m.Metrics - Metrics reporters closed
815514:30:49.327 [virtual-716] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-12 unregistered
815614:30:49.327 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8157 acks = -1
8158 batch.size = 16384
8159 bootstrap.servers = [localhost:6001]
8160 buffer.memory = 33554432
8161 client.dns.lookup = use_all_dns_ips
8162 client.id = producer-20
8163 compression.gzip.level = -1
8164 compression.lz4.level = 9
8165 compression.type = none
8166 compression.zstd.level = 3
8167 connections.max.idle.ms = 540000
8168 delivery.timeout.ms = 120000
8169 enable.idempotence = true
8170 enable.metrics.push = true
8171 interceptor.classes = []
8172 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8173 linger.ms = 5
8174 max.block.ms = 10000
8175 max.in.flight.requests.per.connection = 5
8176 max.request.size = 1048576
8177 metadata.max.age.ms = 300000
8178 metadata.max.idle.ms = 300000
8179 metadata.recovery.rebootstrap.trigger.ms = 300000
8180 metadata.recovery.strategy = rebootstrap
8181 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8182 metrics.num.samples = 2
8183 metrics.recording.level = INFO
8184 metrics.sample.window.ms = 30000
8185 partitioner.adaptive.partitioning.enable = true
8186 partitioner.availability.timeout.ms = 0
8187 partitioner.class = null
8188 partitioner.ignore.keys = false
8189 receive.buffer.bytes = 32768
8190 reconnect.backoff.max.ms = 1000
8191 reconnect.backoff.ms = 50
8192 request.timeout.ms = 30000
8193 retries = 2147483647
8194 retry.backoff.max.ms = 1000
8195 retry.backoff.ms = 1000
8196 sasl.client.callback.handler.class = null
8197 sasl.jaas.config = null
8198 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8199 sasl.kerberos.min.time.before.relogin = 60000
8200 sasl.kerberos.service.name = null
8201 sasl.kerberos.ticket.renew.jitter = 0.05
8202 sasl.kerberos.ticket.renew.window.factor = 0.8
8203 sasl.login.callback.handler.class = null
8204 sasl.login.class = null
8205 sasl.login.connect.timeout.ms = null
8206 sasl.login.read.timeout.ms = null
8207 sasl.login.refresh.buffer.seconds = 300
8208 sasl.login.refresh.min.period.seconds = 60
8209 sasl.login.refresh.window.factor = 0.8
8210 sasl.login.refresh.window.jitter = 0.05
8211 sasl.login.retry.backoff.max.ms = 10000
8212 sasl.login.retry.backoff.ms = 100
8213 sasl.mechanism = GSSAPI
8214 sasl.oauthbearer.assertion.algorithm = RS256
8215 sasl.oauthbearer.assertion.claim.aud = null
8216 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8217 sasl.oauthbearer.assertion.claim.iss = null
8218 sasl.oauthbearer.assertion.claim.jti.include = false
8219 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8220 sasl.oauthbearer.assertion.claim.sub = null
8221 sasl.oauthbearer.assertion.file = null
8222 sasl.oauthbearer.assertion.private.key.file = null
8223 sasl.oauthbearer.assertion.private.key.passphrase = null
8224 sasl.oauthbearer.assertion.template.file = null
8225 sasl.oauthbearer.client.credentials.client.id = null
8226 sasl.oauthbearer.client.credentials.client.secret = null
8227 sasl.oauthbearer.clock.skew.seconds = 30
8228 sasl.oauthbearer.expected.audience = null
8229 sasl.oauthbearer.expected.issuer = null
8230 sasl.oauthbearer.header.urlencode = false
8231 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8232 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8233 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8234 sasl.oauthbearer.jwks.endpoint.url = null
8235 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8236 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8237 sasl.oauthbearer.scope = null
8238 sasl.oauthbearer.scope.claim.name = scope
8239 sasl.oauthbearer.sub.claim.name = sub
8240 sasl.oauthbearer.token.endpoint.url = null
8241 security.protocol = PLAINTEXT
8242 security.providers = null
8243 send.buffer.bytes = 131072
8244 socket.connection.setup.timeout.max.ms = 30000
8245 socket.connection.setup.timeout.ms = 10000
8246 ssl.cipher.suites = null
8247 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8248 ssl.endpoint.identification.algorithm = https
8249 ssl.engine.factory.class = null
8250 ssl.key.password = null
8251 ssl.keymanager.algorithm = SunX509
8252 ssl.keystore.certificate.chain = null
8253 ssl.keystore.key = null
8254 ssl.keystore.location = null
8255 ssl.keystore.password = null
8256 ssl.keystore.type = JKS
8257 ssl.protocol = TLSv1.3
8258 ssl.provider = null
8259 ssl.secure.random.implementation = null
8260 ssl.trustmanager.algorithm = PKIX
8261 ssl.truststore.certificates = null
8262 ssl.truststore.location = null
8263 ssl.truststore.password = null
8264 ssl.truststore.type = JKS
8265 transaction.timeout.ms = 60000
8266 transaction.two.phase.commit.enable = false
8267 transactional.id = null
8268 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8269
827014:30:49.328 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
827114:30:49.328 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Instantiated an idempotent producer.
827214:30:49.329 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
827314:30:49.329 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
827414:30:49.329 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855049329
827514:30:49.332 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.Metadata - [Producer clientId=producer-20] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
827614:30:49.332 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-20] ProducerId set to 19 with epoch 0
827714:30:49.340 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
827814:30:49.341 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
827914:30:49.342 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
828014:30:49.342 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
828114:30:49.342 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
828214:30:49.342 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-20 unregistered
828314:30:49.343 [virtual-718] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8284 allow.auto.create.topics = true
8285 auto.commit.interval.ms = 5000
8286 auto.offset.reset = earliest
8287 bootstrap.servers = [localhost:6001]
8288 check.crcs = true
8289 client.dns.lookup = use_all_dns_ips
8290 client.id = consumer-g6_1-13
8291 client.rack =
8292 connections.max.idle.ms = 540000
8293 default.api.timeout.ms = 60000
8294 enable.auto.commit = false
8295 enable.metrics.push = true
8296 exclude.internal.topics = true
8297 fetch.max.bytes = 52428800
8298 fetch.max.wait.ms = 500
8299 fetch.min.bytes = 1
8300 group.id = g6_1
8301 group.instance.id = null
8302 group.protocol = classic
8303 group.remote.assignor = null
8304 heartbeat.interval.ms = 3000
8305 interceptor.classes = []
8306 internal.leave.group.on.close = true
8307 internal.throw.on.fetch.stable.offset.unsupported = false
8308 isolation.level = read_uncommitted
8309 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8310 max.partition.fetch.bytes = 1048576
8311 max.poll.interval.ms = 300000
8312 max.poll.records = 500
8313 metadata.max.age.ms = 300000
8314 metadata.recovery.rebootstrap.trigger.ms = 300000
8315 metadata.recovery.strategy = rebootstrap
8316 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8317 metrics.num.samples = 2
8318 metrics.recording.level = INFO
8319 metrics.sample.window.ms = 30000
8320 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8321 receive.buffer.bytes = 65536
8322 reconnect.backoff.max.ms = 1000
8323 reconnect.backoff.ms = 50
8324 request.timeout.ms = 30000
8325 retry.backoff.max.ms = 1000
8326 retry.backoff.ms = 100
8327 sasl.client.callback.handler.class = null
8328 sasl.jaas.config = null
8329 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8330 sasl.kerberos.min.time.before.relogin = 60000
8331 sasl.kerberos.service.name = null
8332 sasl.kerberos.ticket.renew.jitter = 0.05
8333 sasl.kerberos.ticket.renew.window.factor = 0.8
8334 sasl.login.callback.handler.class = null
8335 sasl.login.class = null
8336 sasl.login.connect.timeout.ms = null
8337 sasl.login.read.timeout.ms = null
8338 sasl.login.refresh.buffer.seconds = 300
8339 sasl.login.refresh.min.period.seconds = 60
8340 sasl.login.refresh.window.factor = 0.8
8341 sasl.login.refresh.window.jitter = 0.05
8342 sasl.login.retry.backoff.max.ms = 10000
8343 sasl.login.retry.backoff.ms = 100
8344 sasl.mechanism = GSSAPI
8345 sasl.oauthbearer.assertion.algorithm = RS256
8346 sasl.oauthbearer.assertion.claim.aud = null
8347 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8348 sasl.oauthbearer.assertion.claim.iss = null
8349 sasl.oauthbearer.assertion.claim.jti.include = false
8350 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8351 sasl.oauthbearer.assertion.claim.sub = null
8352 sasl.oauthbearer.assertion.file = null
8353 sasl.oauthbearer.assertion.private.key.file = null
8354 sasl.oauthbearer.assertion.private.key.passphrase = null
8355 sasl.oauthbearer.assertion.template.file = null
8356 sasl.oauthbearer.client.credentials.client.id = null
8357 sasl.oauthbearer.client.credentials.client.secret = null
8358 sasl.oauthbearer.clock.skew.seconds = 30
8359 sasl.oauthbearer.expected.audience = null
8360 sasl.oauthbearer.expected.issuer = null
8361 sasl.oauthbearer.header.urlencode = false
8362 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8363 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8364 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8365 sasl.oauthbearer.jwks.endpoint.url = null
8366 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8367 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8368 sasl.oauthbearer.scope = null
8369 sasl.oauthbearer.scope.claim.name = scope
8370 sasl.oauthbearer.sub.claim.name = sub
8371 sasl.oauthbearer.token.endpoint.url = null
8372 security.protocol = PLAINTEXT
8373 security.providers = null
8374 send.buffer.bytes = 131072
8375 session.timeout.ms = 45000
8376 share.acknowledgement.mode = implicit
8377 socket.connection.setup.timeout.max.ms = 30000
8378 socket.connection.setup.timeout.ms = 10000
8379 ssl.cipher.suites = null
8380 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8381 ssl.endpoint.identification.algorithm = https
8382 ssl.engine.factory.class = null
8383 ssl.key.password = null
8384 ssl.keymanager.algorithm = SunX509
8385 ssl.keystore.certificate.chain = null
8386 ssl.keystore.key = null
8387 ssl.keystore.location = null
8388 ssl.keystore.password = null
8389 ssl.keystore.type = JKS
8390 ssl.protocol = TLSv1.3
8391 ssl.provider = null
8392 ssl.secure.random.implementation = null
8393 ssl.trustmanager.algorithm = PKIX
8394 ssl.truststore.certificates = null
8395 ssl.truststore.location = null
8396 ssl.truststore.password = null
8397 ssl.truststore.type = JKS
8398 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8399
840014:30:49.343 [virtual-718] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
840114:30:49.345 [virtual-718] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
840214:30:49.345 [virtual-718] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
840314:30:49.345 [virtual-718] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855049345
840414:30:49.346 [virtual-721] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Subscribed to topic(s): t6_1
840514:30:49.348 [virtual-721] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
840614:30:49.349 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
840714:30:49.349 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
840814:30:49.351 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9 and requesting the member to rejoin with this id.
840914:30:49.351 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9
841014:30:49.351 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
841114:30:49.352 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9 joins group g6_1 in Empty state. Adding to the group now.
841214:30:49.352 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9).
841314:30:51.995 [quorum-controller-0-event-handler] INFO o.a.k.c.EventPerformanceMonitor - [QuorumController id=0] In the last 60000 ms period, 351 controller events were completed, which took an average of 10.08 ms each. The slowest event was createTopics(1767850661), which took 41.47 ms.
841414:30:52.353 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 3 with 1 members.
841514:30:52.353 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9', protocol='range'}
841614:30:52.353 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Finished assignment for group at generation 3: {consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9=Assignment(partitions=[t6_1-0])}
841714:30:52.354 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9 for group g6_1 for generation 3. The group has 1 members, 0 of which are static.
841814:30:52.360 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9', protocol='range'}
841914:30:52.360 [virtual-721] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
842014:30:52.361 [virtual-721] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
842114:30:52.362 [virtual-721] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t6_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
842214:30:52.365 [virtual-718] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8423 allow.auto.create.topics = true
8424 auto.commit.interval.ms = 5000
8425 auto.offset.reset = earliest
8426 bootstrap.servers = [localhost:6001]
8427 check.crcs = true
8428 client.dns.lookup = use_all_dns_ips
8429 client.id = consumer-g6_2-14
8430 client.rack =
8431 connections.max.idle.ms = 540000
8432 default.api.timeout.ms = 60000
8433 enable.auto.commit = false
8434 enable.metrics.push = true
8435 exclude.internal.topics = true
8436 fetch.max.bytes = 52428800
8437 fetch.max.wait.ms = 500
8438 fetch.min.bytes = 1
8439 group.id = g6_2
8440 group.instance.id = null
8441 group.protocol = classic
8442 group.remote.assignor = null
8443 heartbeat.interval.ms = 3000
8444 interceptor.classes = []
8445 internal.leave.group.on.close = true
8446 internal.throw.on.fetch.stable.offset.unsupported = false
8447 isolation.level = read_uncommitted
8448 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8449 max.partition.fetch.bytes = 1048576
8450 max.poll.interval.ms = 300000
8451 max.poll.records = 500
8452 metadata.max.age.ms = 300000
8453 metadata.recovery.rebootstrap.trigger.ms = 300000
8454 metadata.recovery.strategy = rebootstrap
8455 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8456 metrics.num.samples = 2
8457 metrics.recording.level = INFO
8458 metrics.sample.window.ms = 30000
8459 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8460 receive.buffer.bytes = 65536
8461 reconnect.backoff.max.ms = 1000
8462 reconnect.backoff.ms = 50
8463 request.timeout.ms = 30000
8464 retry.backoff.max.ms = 1000
8465 retry.backoff.ms = 100
8466 sasl.client.callback.handler.class = null
8467 sasl.jaas.config = null
8468 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8469 sasl.kerberos.min.time.before.relogin = 60000
8470 sasl.kerberos.service.name = null
8471 sasl.kerberos.ticket.renew.jitter = 0.05
8472 sasl.kerberos.ticket.renew.window.factor = 0.8
8473 sasl.login.callback.handler.class = null
8474 sasl.login.class = null
8475 sasl.login.connect.timeout.ms = null
8476 sasl.login.read.timeout.ms = null
8477 sasl.login.refresh.buffer.seconds = 300
8478 sasl.login.refresh.min.period.seconds = 60
8479 sasl.login.refresh.window.factor = 0.8
8480 sasl.login.refresh.window.jitter = 0.05
8481 sasl.login.retry.backoff.max.ms = 10000
8482 sasl.login.retry.backoff.ms = 100
8483 sasl.mechanism = GSSAPI
8484 sasl.oauthbearer.assertion.algorithm = RS256
8485 sasl.oauthbearer.assertion.claim.aud = null
8486 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8487 sasl.oauthbearer.assertion.claim.iss = null
8488 sasl.oauthbearer.assertion.claim.jti.include = false
8489 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8490 sasl.oauthbearer.assertion.claim.sub = null
8491 sasl.oauthbearer.assertion.file = null
8492 sasl.oauthbearer.assertion.private.key.file = null
8493 sasl.oauthbearer.assertion.private.key.passphrase = null
8494 sasl.oauthbearer.assertion.template.file = null
8495 sasl.oauthbearer.client.credentials.client.id = null
8496 sasl.oauthbearer.client.credentials.client.secret = null
8497 sasl.oauthbearer.clock.skew.seconds = 30
8498 sasl.oauthbearer.expected.audience = null
8499 sasl.oauthbearer.expected.issuer = null
8500 sasl.oauthbearer.header.urlencode = false
8501 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8502 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8503 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8504 sasl.oauthbearer.jwks.endpoint.url = null
8505 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8506 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8507 sasl.oauthbearer.scope = null
8508 sasl.oauthbearer.scope.claim.name = scope
8509 sasl.oauthbearer.sub.claim.name = sub
8510 sasl.oauthbearer.token.endpoint.url = null
8511 security.protocol = PLAINTEXT
8512 security.providers = null
8513 send.buffer.bytes = 131072
8514 session.timeout.ms = 45000
8515 share.acknowledgement.mode = implicit
8516 socket.connection.setup.timeout.max.ms = 30000
8517 socket.connection.setup.timeout.ms = 10000
8518 ssl.cipher.suites = null
8519 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8520 ssl.endpoint.identification.algorithm = https
8521 ssl.engine.factory.class = null
8522 ssl.key.password = null
8523 ssl.keymanager.algorithm = SunX509
8524 ssl.keystore.certificate.chain = null
8525 ssl.keystore.key = null
8526 ssl.keystore.location = null
8527 ssl.keystore.password = null
8528 ssl.keystore.type = JKS
8529 ssl.protocol = TLSv1.3
8530 ssl.provider = null
8531 ssl.secure.random.implementation = null
8532 ssl.trustmanager.algorithm = PKIX
8533 ssl.truststore.certificates = null
8534 ssl.truststore.location = null
8535 ssl.truststore.password = null
8536 ssl.truststore.type = JKS
8537 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8538
853914:30:52.366 [virtual-718] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
854014:30:52.368 [virtual-718] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
854114:30:52.368 [virtual-718] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
854214:30:52.368 [virtual-718] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855052368
854314:30:52.369 [virtual-725] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Subscribed to topic(s): t6_1
854414:30:52.371 [virtual-725] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
854514:30:52.372 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
854614:30:52.373 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
854714:30:52.375 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_2 in Empty state. Created a new member id consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66 and requesting the member to rejoin with this id.
854814:30:52.375 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: need to re-join with the given member-id: consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66
854914:30:52.375 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
855014:30:52.376 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66 joins group g6_2 in Empty state. Adding to the group now.
855114:30:52.376 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66).
855214:30:55.376 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_2 generation 1 with 1 members.
855314:30:55.377 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66', protocol='range'}
855414:30:55.377 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Finished assignment for group at generation 1: {consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66=Assignment(partitions=[t6_1-0])}
855514:30:55.378 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66 for group g6_2 for generation 1. The group has 1 members, 0 of which are static.
855614:30:55.384 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66', protocol='range'}
855714:30:55.384 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Notifying assignor about the new Assignment(partitions=[t6_1-0])
855814:30:55.384 [virtual-725] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Adding newly assigned partitions: [t6_1-0]
855914:30:55.385 [virtual-725] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Found no committed offset for partition t6_1-0
856014:30:55.387 [virtual-725] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
856114:30:55.389 [virtual-724] ERROR o.k.KafkaFlow$ - Exception when polling for records
8562java.lang.InterruptedException: null
8563 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8564 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8565 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8566 at ox.channels.ActorRef.ask(actor.scala:64)
8567 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8568 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8569 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8570 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8571 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8572 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8573 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8574 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8575 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8576 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
857714:30:55.389 [virtual-721] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8578java.lang.InterruptedException: null
8579 ... 18 common frames omitted
8580Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8581 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8582 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8583 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8584 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8585 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8586 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8587 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8588 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8589 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8590 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8591 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8592 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8593 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8594 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8595 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8596 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8597 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8598 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
859914:30:55.390 [virtual-725] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8600java.lang.InterruptedException: null
8601 ... 18 common frames omitted
8602Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8603 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8604 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8605 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8606 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8607 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8608 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8609 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8610 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8611 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8612 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8613 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8614 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8615 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8616 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8617 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8618 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8619 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8620 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
862114:30:55.389 [virtual-720] ERROR o.k.KafkaFlow$ - Exception when polling for records
8622java.lang.InterruptedException: null
8623 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8624 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8625 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8626 at ox.channels.ActorRef.ask(actor.scala:64)
8627 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8628 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8629 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8630 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8631 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8632 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8633 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8634 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8635 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8636 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
863714:30:55.390 [virtual-727] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
863814:30:55.391 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Member consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
863914:30:55.391 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
864014:30:55.391 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
864114:30:55.391 [virtual-728] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Revoke previously assigned partitions [t6_1-0]
864214:30:55.391 [virtual-728] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Member consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
864314:30:55.391 [virtual-728] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting generation and member id due to: consumer pro-actively leaving the group
864414:30:55.391 [virtual-728] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: consumer pro-actively leaving the group
864514:30:55.393 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
864614:30:55.393 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g6_1-13-27e637b2-2a8a-40df-bc35-e821b3db62d9) members.).
864714:30:55.393 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 4 is now empty.
864814:30:55.393 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_2] Member consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
864914:30:55.393 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_2-14-e2278066-f4dc-4ffb-b2b9-ae192f8c3c66) members.).
865014:30:55.393 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_2 with generation 2 is now empty.
865114:30:55.877 [virtual-727] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
865214:30:55.877 [virtual-727] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
865314:30:55.877 [virtual-727] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
865414:30:55.877 [virtual-727] INFO o.a.k.c.m.Metrics - Metrics reporters closed
865514:30:55.879 [virtual-727] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-13 unregistered
865614:30:55.891 [virtual-728] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
865714:30:55.891 [virtual-728] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
865814:30:55.891 [virtual-728] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
865914:30:55.891 [virtual-728] INFO o.a.k.c.m.Metrics - Metrics reporters closed
866014:30:55.892 [virtual-728] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_2-14 unregistered
866114:30:55.894 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8662 acks = -1
8663 batch.size = 16384
8664 bootstrap.servers = [localhost:6001]
8665 buffer.memory = 33554432
8666 client.dns.lookup = use_all_dns_ips
8667 client.id = producer-21
8668 compression.gzip.level = -1
8669 compression.lz4.level = 9
8670 compression.type = none
8671 compression.zstd.level = 3
8672 connections.max.idle.ms = 540000
8673 delivery.timeout.ms = 120000
8674 enable.idempotence = true
8675 enable.metrics.push = true
8676 interceptor.classes = []
8677 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8678 linger.ms = 5
8679 max.block.ms = 10000
8680 max.in.flight.requests.per.connection = 5
8681 max.request.size = 1048576
8682 metadata.max.age.ms = 300000
8683 metadata.max.idle.ms = 300000
8684 metadata.recovery.rebootstrap.trigger.ms = 300000
8685 metadata.recovery.strategy = rebootstrap
8686 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8687 metrics.num.samples = 2
8688 metrics.recording.level = INFO
8689 metrics.sample.window.ms = 30000
8690 partitioner.adaptive.partitioning.enable = true
8691 partitioner.availability.timeout.ms = 0
8692 partitioner.class = null
8693 partitioner.ignore.keys = false
8694 receive.buffer.bytes = 32768
8695 reconnect.backoff.max.ms = 1000
8696 reconnect.backoff.ms = 50
8697 request.timeout.ms = 30000
8698 retries = 2147483647
8699 retry.backoff.max.ms = 1000
8700 retry.backoff.ms = 1000
8701 sasl.client.callback.handler.class = null
8702 sasl.jaas.config = null
8703 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8704 sasl.kerberos.min.time.before.relogin = 60000
8705 sasl.kerberos.service.name = null
8706 sasl.kerberos.ticket.renew.jitter = 0.05
8707 sasl.kerberos.ticket.renew.window.factor = 0.8
8708 sasl.login.callback.handler.class = null
8709 sasl.login.class = null
8710 sasl.login.connect.timeout.ms = null
8711 sasl.login.read.timeout.ms = null
8712 sasl.login.refresh.buffer.seconds = 300
8713 sasl.login.refresh.min.period.seconds = 60
8714 sasl.login.refresh.window.factor = 0.8
8715 sasl.login.refresh.window.jitter = 0.05
8716 sasl.login.retry.backoff.max.ms = 10000
8717 sasl.login.retry.backoff.ms = 100
8718 sasl.mechanism = GSSAPI
8719 sasl.oauthbearer.assertion.algorithm = RS256
8720 sasl.oauthbearer.assertion.claim.aud = null
8721 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8722 sasl.oauthbearer.assertion.claim.iss = null
8723 sasl.oauthbearer.assertion.claim.jti.include = false
8724 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8725 sasl.oauthbearer.assertion.claim.sub = null
8726 sasl.oauthbearer.assertion.file = null
8727 sasl.oauthbearer.assertion.private.key.file = null
8728 sasl.oauthbearer.assertion.private.key.passphrase = null
8729 sasl.oauthbearer.assertion.template.file = null
8730 sasl.oauthbearer.client.credentials.client.id = null
8731 sasl.oauthbearer.client.credentials.client.secret = null
8732 sasl.oauthbearer.clock.skew.seconds = 30
8733 sasl.oauthbearer.expected.audience = null
8734 sasl.oauthbearer.expected.issuer = null
8735 sasl.oauthbearer.header.urlencode = false
8736 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8737 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8738 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8739 sasl.oauthbearer.jwks.endpoint.url = null
8740 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8741 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8742 sasl.oauthbearer.scope = null
8743 sasl.oauthbearer.scope.claim.name = scope
8744 sasl.oauthbearer.sub.claim.name = sub
8745 sasl.oauthbearer.token.endpoint.url = null
8746 security.protocol = PLAINTEXT
8747 security.providers = null
8748 send.buffer.bytes = 131072
8749 socket.connection.setup.timeout.max.ms = 30000
8750 socket.connection.setup.timeout.ms = 10000
8751 ssl.cipher.suites = null
8752 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8753 ssl.endpoint.identification.algorithm = https
8754 ssl.engine.factory.class = null
8755 ssl.key.password = null
8756 ssl.keymanager.algorithm = SunX509
8757 ssl.keystore.certificate.chain = null
8758 ssl.keystore.key = null
8759 ssl.keystore.location = null
8760 ssl.keystore.password = null
8761 ssl.keystore.type = JKS
8762 ssl.protocol = TLSv1.3
8763 ssl.provider = null
8764 ssl.secure.random.implementation = null
8765 ssl.trustmanager.algorithm = PKIX
8766 ssl.truststore.certificates = null
8767 ssl.truststore.location = null
8768 ssl.truststore.password = null
8769 ssl.truststore.type = JKS
8770 transaction.timeout.ms = 60000
8771 transaction.two.phase.commit.enable = false
8772 transactional.id = null
8773 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8774
877514:30:55.895 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
877614:30:55.895 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Instantiated an idempotent producer.
877714:30:55.897 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
877814:30:55.897 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
877914:30:55.897 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855055897
878014:30:55.899 [data-plane-kafka-request-handler-2] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t7_1) to the active controller.
878114:30:55.900 [kafka-producer-network-thread | producer-21] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-21] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t7_1=UNKNOWN_TOPIC_OR_PARTITION}
878214:30:55.900 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.Metadata - [Producer clientId=producer-21] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
878314:30:55.900 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-21] ProducerId set to 20 with epoch 0
878414:30:55.901 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t7_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
878514:30:55.901 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t7_1 with topic ID x14eQyvoQz6VY5Mi8PZ1rg.
878614:30:55.901 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t7_1-0 with topic ID x14eQyvoQz6VY5Mi8PZ1rg and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
878714:30:55.927 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
878814:30:55.928 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t7_1-0)
878914:30:55.928 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t7_1-0 with topic id x14eQyvoQz6VY5Mi8PZ1rg.
879014:30:55.930 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t7_1-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
879114:30:55.931 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t7_1-0 in /tmp/kafka-logs7572554700115704093/t7_1-0 with properties {}
879214:30:55.931 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] No checkpointed highwatermark is found for partition t7_1-0
879314:30:55.931 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] Log loaded for partition t7_1-0 with initial high watermark 0
879414:30:55.932 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t7_1-0 with topic id Some(x14eQyvoQz6VY5Mi8PZ1rg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
879514:30:56.911 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
879614:30:56.912 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
879714:30:56.912 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
879814:30:56.912 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
879914:30:56.912 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
880014:30:56.912 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-21 unregistered
880114:30:56.913 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8802 acks = -1
8803 batch.size = 16384
8804 bootstrap.servers = [localhost:6001]
8805 buffer.memory = 33554432
8806 client.dns.lookup = use_all_dns_ips
8807 client.id = producer-22
8808 compression.gzip.level = -1
8809 compression.lz4.level = 9
8810 compression.type = none
8811 compression.zstd.level = 3
8812 connections.max.idle.ms = 540000
8813 delivery.timeout.ms = 120000
8814 enable.idempotence = true
8815 enable.metrics.push = true
8816 interceptor.classes = []
8817 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8818 linger.ms = 5
8819 max.block.ms = 10000
8820 max.in.flight.requests.per.connection = 5
8821 max.request.size = 1048576
8822 metadata.max.age.ms = 300000
8823 metadata.max.idle.ms = 300000
8824 metadata.recovery.rebootstrap.trigger.ms = 300000
8825 metadata.recovery.strategy = rebootstrap
8826 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8827 metrics.num.samples = 2
8828 metrics.recording.level = INFO
8829 metrics.sample.window.ms = 30000
8830 partitioner.adaptive.partitioning.enable = true
8831 partitioner.availability.timeout.ms = 0
8832 partitioner.class = null
8833 partitioner.ignore.keys = false
8834 receive.buffer.bytes = 32768
8835 reconnect.backoff.max.ms = 1000
8836 reconnect.backoff.ms = 50
8837 request.timeout.ms = 30000
8838 retries = 2147483647
8839 retry.backoff.max.ms = 1000
8840 retry.backoff.ms = 1000
8841 sasl.client.callback.handler.class = null
8842 sasl.jaas.config = null
8843 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8844 sasl.kerberos.min.time.before.relogin = 60000
8845 sasl.kerberos.service.name = null
8846 sasl.kerberos.ticket.renew.jitter = 0.05
8847 sasl.kerberos.ticket.renew.window.factor = 0.8
8848 sasl.login.callback.handler.class = null
8849 sasl.login.class = null
8850 sasl.login.connect.timeout.ms = null
8851 sasl.login.read.timeout.ms = null
8852 sasl.login.refresh.buffer.seconds = 300
8853 sasl.login.refresh.min.period.seconds = 60
8854 sasl.login.refresh.window.factor = 0.8
8855 sasl.login.refresh.window.jitter = 0.05
8856 sasl.login.retry.backoff.max.ms = 10000
8857 sasl.login.retry.backoff.ms = 100
8858 sasl.mechanism = GSSAPI
8859 sasl.oauthbearer.assertion.algorithm = RS256
8860 sasl.oauthbearer.assertion.claim.aud = null
8861 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8862 sasl.oauthbearer.assertion.claim.iss = null
8863 sasl.oauthbearer.assertion.claim.jti.include = false
8864 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8865 sasl.oauthbearer.assertion.claim.sub = null
8866 sasl.oauthbearer.assertion.file = null
8867 sasl.oauthbearer.assertion.private.key.file = null
8868 sasl.oauthbearer.assertion.private.key.passphrase = null
8869 sasl.oauthbearer.assertion.template.file = null
8870 sasl.oauthbearer.client.credentials.client.id = null
8871 sasl.oauthbearer.client.credentials.client.secret = null
8872 sasl.oauthbearer.clock.skew.seconds = 30
8873 sasl.oauthbearer.expected.audience = null
8874 sasl.oauthbearer.expected.issuer = null
8875 sasl.oauthbearer.header.urlencode = false
8876 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8877 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8878 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8879 sasl.oauthbearer.jwks.endpoint.url = null
8880 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8881 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8882 sasl.oauthbearer.scope = null
8883 sasl.oauthbearer.scope.claim.name = scope
8884 sasl.oauthbearer.sub.claim.name = sub
8885 sasl.oauthbearer.token.endpoint.url = null
8886 security.protocol = PLAINTEXT
8887 security.providers = null
8888 send.buffer.bytes = 131072
8889 socket.connection.setup.timeout.max.ms = 30000
8890 socket.connection.setup.timeout.ms = 10000
8891 ssl.cipher.suites = null
8892 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8893 ssl.endpoint.identification.algorithm = https
8894 ssl.engine.factory.class = null
8895 ssl.key.password = null
8896 ssl.keymanager.algorithm = SunX509
8897 ssl.keystore.certificate.chain = null
8898 ssl.keystore.key = null
8899 ssl.keystore.location = null
8900 ssl.keystore.password = null
8901 ssl.keystore.type = JKS
8902 ssl.protocol = TLSv1.3
8903 ssl.provider = null
8904 ssl.secure.random.implementation = null
8905 ssl.trustmanager.algorithm = PKIX
8906 ssl.truststore.certificates = null
8907 ssl.truststore.location = null
8908 ssl.truststore.password = null
8909 ssl.truststore.type = JKS
8910 transaction.timeout.ms = 60000
8911 transaction.two.phase.commit.enable = false
8912 transactional.id = null
8913 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8914
891514:30:56.913 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
891614:30:56.913 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Instantiated an idempotent producer.
891714:30:56.914 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
891814:30:56.914 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
891914:30:56.915 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855056914
892014:30:56.917 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.Metadata - [Producer clientId=producer-22] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
892114:30:56.917 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-22] ProducerId set to 21 with epoch 0
892214:30:56.925 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
892314:30:56.926 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
892414:30:56.926 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
892514:30:56.926 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
892614:30:56.926 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
892714:30:56.927 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-22 unregistered
892814:30:56.927 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8929 acks = -1
8930 batch.size = 16384
8931 bootstrap.servers = [localhost:6001]
8932 buffer.memory = 33554432
8933 client.dns.lookup = use_all_dns_ips
8934 client.id = producer-23
8935 compression.gzip.level = -1
8936 compression.lz4.level = 9
8937 compression.type = none
8938 compression.zstd.level = 3
8939 connections.max.idle.ms = 540000
8940 delivery.timeout.ms = 120000
8941 enable.idempotence = true
8942 enable.metrics.push = true
8943 interceptor.classes = []
8944 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8945 linger.ms = 5
8946 max.block.ms = 10000
8947 max.in.flight.requests.per.connection = 5
8948 max.request.size = 1048576
8949 metadata.max.age.ms = 300000
8950 metadata.max.idle.ms = 300000
8951 metadata.recovery.rebootstrap.trigger.ms = 300000
8952 metadata.recovery.strategy = rebootstrap
8953 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8954 metrics.num.samples = 2
8955 metrics.recording.level = INFO
8956 metrics.sample.window.ms = 30000
8957 partitioner.adaptive.partitioning.enable = true
8958 partitioner.availability.timeout.ms = 0
8959 partitioner.class = null
8960 partitioner.ignore.keys = false
8961 receive.buffer.bytes = 32768
8962 reconnect.backoff.max.ms = 1000
8963 reconnect.backoff.ms = 50
8964 request.timeout.ms = 30000
8965 retries = 2147483647
8966 retry.backoff.max.ms = 1000
8967 retry.backoff.ms = 1000
8968 sasl.client.callback.handler.class = null
8969 sasl.jaas.config = null
8970 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8971 sasl.kerberos.min.time.before.relogin = 60000
8972 sasl.kerberos.service.name = null
8973 sasl.kerberos.ticket.renew.jitter = 0.05
8974 sasl.kerberos.ticket.renew.window.factor = 0.8
8975 sasl.login.callback.handler.class = null
8976 sasl.login.class = null
8977 sasl.login.connect.timeout.ms = null
8978 sasl.login.read.timeout.ms = null
8979 sasl.login.refresh.buffer.seconds = 300
8980 sasl.login.refresh.min.period.seconds = 60
8981 sasl.login.refresh.window.factor = 0.8
8982 sasl.login.refresh.window.jitter = 0.05
8983 sasl.login.retry.backoff.max.ms = 10000
8984 sasl.login.retry.backoff.ms = 100
8985 sasl.mechanism = GSSAPI
8986 sasl.oauthbearer.assertion.algorithm = RS256
8987 sasl.oauthbearer.assertion.claim.aud = null
8988 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8989 sasl.oauthbearer.assertion.claim.iss = null
8990 sasl.oauthbearer.assertion.claim.jti.include = false
8991 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8992 sasl.oauthbearer.assertion.claim.sub = null
8993 sasl.oauthbearer.assertion.file = null
8994 sasl.oauthbearer.assertion.private.key.file = null
8995 sasl.oauthbearer.assertion.private.key.passphrase = null
8996 sasl.oauthbearer.assertion.template.file = null
8997 sasl.oauthbearer.client.credentials.client.id = null
8998 sasl.oauthbearer.client.credentials.client.secret = null
8999 sasl.oauthbearer.clock.skew.seconds = 30
9000 sasl.oauthbearer.expected.audience = null
9001 sasl.oauthbearer.expected.issuer = null
9002 sasl.oauthbearer.header.urlencode = false
9003 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9004 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9005 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9006 sasl.oauthbearer.jwks.endpoint.url = null
9007 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9008 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9009 sasl.oauthbearer.scope = null
9010 sasl.oauthbearer.scope.claim.name = scope
9011 sasl.oauthbearer.sub.claim.name = sub
9012 sasl.oauthbearer.token.endpoint.url = null
9013 security.protocol = PLAINTEXT
9014 security.providers = null
9015 send.buffer.bytes = 131072
9016 socket.connection.setup.timeout.max.ms = 30000
9017 socket.connection.setup.timeout.ms = 10000
9018 ssl.cipher.suites = null
9019 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9020 ssl.endpoint.identification.algorithm = https
9021 ssl.engine.factory.class = null
9022 ssl.key.password = null
9023 ssl.keymanager.algorithm = SunX509
9024 ssl.keystore.certificate.chain = null
9025 ssl.keystore.key = null
9026 ssl.keystore.location = null
9027 ssl.keystore.password = null
9028 ssl.keystore.type = JKS
9029 ssl.protocol = TLSv1.3
9030 ssl.provider = null
9031 ssl.secure.random.implementation = null
9032 ssl.trustmanager.algorithm = PKIX
9033 ssl.truststore.certificates = null
9034 ssl.truststore.location = null
9035 ssl.truststore.password = null
9036 ssl.truststore.type = JKS
9037 transaction.timeout.ms = 60000
9038 transaction.two.phase.commit.enable = false
9039 transactional.id = null
9040 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9041
904214:30:56.927 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
904314:30:56.928 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Instantiated an idempotent producer.
904414:30:56.930 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
904514:30:56.930 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
904614:30:56.930 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855056930
904714:30:56.932 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.Metadata - [Producer clientId=producer-23] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
904814:30:56.932 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-23] ProducerId set to 22 with epoch 0
904914:30:56.940 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
905014:30:56.941 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
905114:30:56.941 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
905214:30:56.941 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
905314:30:56.941 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
905414:30:56.941 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-23 unregistered
905514:30:56.943 [virtual-734] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9056 allow.auto.create.topics = true
9057 auto.commit.interval.ms = 5000
9058 auto.offset.reset = earliest
9059 bootstrap.servers = [localhost:6001]
9060 check.crcs = true
9061 client.dns.lookup = use_all_dns_ips
9062 client.id = consumer-g7_1-15
9063 client.rack =
9064 connections.max.idle.ms = 540000
9065 default.api.timeout.ms = 60000
9066 enable.auto.commit = false
9067 enable.metrics.push = true
9068 exclude.internal.topics = true
9069 fetch.max.bytes = 52428800
9070 fetch.max.wait.ms = 500
9071 fetch.min.bytes = 1
9072 group.id = g7_1
9073 group.instance.id = null
9074 group.protocol = classic
9075 group.remote.assignor = null
9076 heartbeat.interval.ms = 3000
9077 interceptor.classes = []
9078 internal.leave.group.on.close = true
9079 internal.throw.on.fetch.stable.offset.unsupported = false
9080 isolation.level = read_uncommitted
9081 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9082 max.partition.fetch.bytes = 1048576
9083 max.poll.interval.ms = 300000
9084 max.poll.records = 500
9085 metadata.max.age.ms = 300000
9086 metadata.recovery.rebootstrap.trigger.ms = 300000
9087 metadata.recovery.strategy = rebootstrap
9088 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9089 metrics.num.samples = 2
9090 metrics.recording.level = INFO
9091 metrics.sample.window.ms = 30000
9092 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9093 receive.buffer.bytes = 65536
9094 reconnect.backoff.max.ms = 1000
9095 reconnect.backoff.ms = 50
9096 request.timeout.ms = 30000
9097 retry.backoff.max.ms = 1000
9098 retry.backoff.ms = 100
9099 sasl.client.callback.handler.class = null
9100 sasl.jaas.config = null
9101 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9102 sasl.kerberos.min.time.before.relogin = 60000
9103 sasl.kerberos.service.name = null
9104 sasl.kerberos.ticket.renew.jitter = 0.05
9105 sasl.kerberos.ticket.renew.window.factor = 0.8
9106 sasl.login.callback.handler.class = null
9107 sasl.login.class = null
9108 sasl.login.connect.timeout.ms = null
9109 sasl.login.read.timeout.ms = null
9110 sasl.login.refresh.buffer.seconds = 300
9111 sasl.login.refresh.min.period.seconds = 60
9112 sasl.login.refresh.window.factor = 0.8
9113 sasl.login.refresh.window.jitter = 0.05
9114 sasl.login.retry.backoff.max.ms = 10000
9115 sasl.login.retry.backoff.ms = 100
9116 sasl.mechanism = GSSAPI
9117 sasl.oauthbearer.assertion.algorithm = RS256
9118 sasl.oauthbearer.assertion.claim.aud = null
9119 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9120 sasl.oauthbearer.assertion.claim.iss = null
9121 sasl.oauthbearer.assertion.claim.jti.include = false
9122 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9123 sasl.oauthbearer.assertion.claim.sub = null
9124 sasl.oauthbearer.assertion.file = null
9125 sasl.oauthbearer.assertion.private.key.file = null
9126 sasl.oauthbearer.assertion.private.key.passphrase = null
9127 sasl.oauthbearer.assertion.template.file = null
9128 sasl.oauthbearer.client.credentials.client.id = null
9129 sasl.oauthbearer.client.credentials.client.secret = null
9130 sasl.oauthbearer.clock.skew.seconds = 30
9131 sasl.oauthbearer.expected.audience = null
9132 sasl.oauthbearer.expected.issuer = null
9133 sasl.oauthbearer.header.urlencode = false
9134 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9135 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9136 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9137 sasl.oauthbearer.jwks.endpoint.url = null
9138 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9139 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9140 sasl.oauthbearer.scope = null
9141 sasl.oauthbearer.scope.claim.name = scope
9142 sasl.oauthbearer.sub.claim.name = sub
9143 sasl.oauthbearer.token.endpoint.url = null
9144 security.protocol = PLAINTEXT
9145 security.providers = null
9146 send.buffer.bytes = 131072
9147 session.timeout.ms = 45000
9148 share.acknowledgement.mode = implicit
9149 socket.connection.setup.timeout.max.ms = 30000
9150 socket.connection.setup.timeout.ms = 10000
9151 ssl.cipher.suites = null
9152 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9153 ssl.endpoint.identification.algorithm = https
9154 ssl.engine.factory.class = null
9155 ssl.key.password = null
9156 ssl.keymanager.algorithm = SunX509
9157 ssl.keystore.certificate.chain = null
9158 ssl.keystore.key = null
9159 ssl.keystore.location = null
9160 ssl.keystore.password = null
9161 ssl.keystore.type = JKS
9162 ssl.protocol = TLSv1.3
9163 ssl.provider = null
9164 ssl.secure.random.implementation = null
9165 ssl.trustmanager.algorithm = PKIX
9166 ssl.truststore.certificates = null
9167 ssl.truststore.location = null
9168 ssl.truststore.password = null
9169 ssl.truststore.type = JKS
9170 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9171
917214:30:56.943 [virtual-734] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
917314:30:56.945 [virtual-734] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
917414:30:56.945 [virtual-734] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
917514:30:56.945 [virtual-734] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855056945
917614:30:56.946 [virtual-735] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Subscribed to topic(s): t7_1
917714:30:56.948 [virtual-735] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
917814:30:56.949 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
917914:30:56.949 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
918014:30:56.951 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70 and requesting the member to rejoin with this id.
918114:30:56.951 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70
918214:30:56.951 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
918314:30:56.952 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70 joins group g7_1 in Empty state. Adding to the group now.
918414:30:56.952 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70 with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70).
918514:30:59.952 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 1 with 1 members.
918614:30:59.952 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70', protocol='range'}
918714:30:59.952 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Finished assignment for group at generation 1: {consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70=Assignment(partitions=[t7_1-0])}
918814:30:59.953 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70 for group g7_1 for generation 1. The group has 1 members, 0 of which are static.
918914:30:59.959 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70', protocol='range'}
919014:30:59.959 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
919114:30:59.959 [virtual-735] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
919214:30:59.960 [virtual-735] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Found no committed offset for partition t7_1-0
919314:30:59.962 [virtual-735] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
919414:31:01.965 [virtual-737] ERROR o.k.KafkaFlow$ - Exception when polling for records
9195java.lang.InterruptedException: null
9196 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9197 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9198 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9199 at ox.channels.ActorRef.ask(actor.scala:64)
9200 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9201 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
9202 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9203 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9204 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
9205 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
9206 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
9207 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
9208 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9209 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
9210 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
9211 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
9212 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
9213 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9214 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9215 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
921614:31:01.965 [virtual-735] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9217java.lang.InterruptedException: null
9218 ... 18 common frames omitted
9219Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9220 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9221 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9222 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9223 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9224 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9225 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9226 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9227 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9228 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9229 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9230 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9231 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9232 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9233 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9234 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9235 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9236 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9237 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
923814:31:01.966 [virtual-742] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
923914:31:01.966 [virtual-742] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Member consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
924014:31:01.966 [virtual-742] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
924114:31:01.966 [virtual-742] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
924214:31:01.966 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
924314:31:01.966 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_1-15-405fe5fe-8b1d-4fa0-954e-7982e7c22a70) members.).
924414:31:01.967 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 2 is now empty.
924514:31:01.973 [virtual-742] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
924614:31:01.973 [virtual-742] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
924714:31:01.973 [virtual-742] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
924814:31:01.974 [virtual-742] INFO o.a.k.c.m.Metrics - Metrics reporters closed
924914:31:01.975 [virtual-742] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-15 unregistered
925014:31:01.976 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9251 acks = -1
9252 batch.size = 16384
9253 bootstrap.servers = [localhost:6001]
9254 buffer.memory = 33554432
9255 client.dns.lookup = use_all_dns_ips
9256 client.id = producer-24
9257 compression.gzip.level = -1
9258 compression.lz4.level = 9
9259 compression.type = none
9260 compression.zstd.level = 3
9261 connections.max.idle.ms = 540000
9262 delivery.timeout.ms = 120000
9263 enable.idempotence = true
9264 enable.metrics.push = true
9265 interceptor.classes = []
9266 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9267 linger.ms = 5
9268 max.block.ms = 10000
9269 max.in.flight.requests.per.connection = 5
9270 max.request.size = 1048576
9271 metadata.max.age.ms = 300000
9272 metadata.max.idle.ms = 300000
9273 metadata.recovery.rebootstrap.trigger.ms = 300000
9274 metadata.recovery.strategy = rebootstrap
9275 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9276 metrics.num.samples = 2
9277 metrics.recording.level = INFO
9278 metrics.sample.window.ms = 30000
9279 partitioner.adaptive.partitioning.enable = true
9280 partitioner.availability.timeout.ms = 0
9281 partitioner.class = null
9282 partitioner.ignore.keys = false
9283 receive.buffer.bytes = 32768
9284 reconnect.backoff.max.ms = 1000
9285 reconnect.backoff.ms = 50
9286 request.timeout.ms = 30000
9287 retries = 2147483647
9288 retry.backoff.max.ms = 1000
9289 retry.backoff.ms = 1000
9290 sasl.client.callback.handler.class = null
9291 sasl.jaas.config = null
9292 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9293 sasl.kerberos.min.time.before.relogin = 60000
9294 sasl.kerberos.service.name = null
9295 sasl.kerberos.ticket.renew.jitter = 0.05
9296 sasl.kerberos.ticket.renew.window.factor = 0.8
9297 sasl.login.callback.handler.class = null
9298 sasl.login.class = null
9299 sasl.login.connect.timeout.ms = null
9300 sasl.login.read.timeout.ms = null
9301 sasl.login.refresh.buffer.seconds = 300
9302 sasl.login.refresh.min.period.seconds = 60
9303 sasl.login.refresh.window.factor = 0.8
9304 sasl.login.refresh.window.jitter = 0.05
9305 sasl.login.retry.backoff.max.ms = 10000
9306 sasl.login.retry.backoff.ms = 100
9307 sasl.mechanism = GSSAPI
9308 sasl.oauthbearer.assertion.algorithm = RS256
9309 sasl.oauthbearer.assertion.claim.aud = null
9310 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9311 sasl.oauthbearer.assertion.claim.iss = null
9312 sasl.oauthbearer.assertion.claim.jti.include = false
9313 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9314 sasl.oauthbearer.assertion.claim.sub = null
9315 sasl.oauthbearer.assertion.file = null
9316 sasl.oauthbearer.assertion.private.key.file = null
9317 sasl.oauthbearer.assertion.private.key.passphrase = null
9318 sasl.oauthbearer.assertion.template.file = null
9319 sasl.oauthbearer.client.credentials.client.id = null
9320 sasl.oauthbearer.client.credentials.client.secret = null
9321 sasl.oauthbearer.clock.skew.seconds = 30
9322 sasl.oauthbearer.expected.audience = null
9323 sasl.oauthbearer.expected.issuer = null
9324 sasl.oauthbearer.header.urlencode = false
9325 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9326 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9327 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9328 sasl.oauthbearer.jwks.endpoint.url = null
9329 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9330 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9331 sasl.oauthbearer.scope = null
9332 sasl.oauthbearer.scope.claim.name = scope
9333 sasl.oauthbearer.sub.claim.name = sub
9334 sasl.oauthbearer.token.endpoint.url = null
9335 security.protocol = PLAINTEXT
9336 security.providers = null
9337 send.buffer.bytes = 131072
9338 socket.connection.setup.timeout.max.ms = 30000
9339 socket.connection.setup.timeout.ms = 10000
9340 ssl.cipher.suites = null
9341 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9342 ssl.endpoint.identification.algorithm = https
9343 ssl.engine.factory.class = null
9344 ssl.key.password = null
9345 ssl.keymanager.algorithm = SunX509
9346 ssl.keystore.certificate.chain = null
9347 ssl.keystore.key = null
9348 ssl.keystore.location = null
9349 ssl.keystore.password = null
9350 ssl.keystore.type = JKS
9351 ssl.protocol = TLSv1.3
9352 ssl.provider = null
9353 ssl.secure.random.implementation = null
9354 ssl.trustmanager.algorithm = PKIX
9355 ssl.truststore.certificates = null
9356 ssl.truststore.location = null
9357 ssl.truststore.password = null
9358 ssl.truststore.type = JKS
9359 transaction.timeout.ms = 60000
9360 transaction.two.phase.commit.enable = false
9361 transactional.id = null
9362 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9363
936414:31:01.976 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
936514:31:01.976 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Instantiated an idempotent producer.
936614:31:01.978 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
936714:31:01.978 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
936814:31:01.978 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855061978
936914:31:01.981 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.Metadata - [Producer clientId=producer-24] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
937014:31:01.981 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-24] ProducerId set to 23 with epoch 0
937114:31:01.989 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
937214:31:01.991 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
937314:31:01.991 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
937414:31:01.991 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
937514:31:01.991 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
937614:31:01.991 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-24 unregistered
937714:31:01.992 [virtual-744] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9378 allow.auto.create.topics = true
9379 auto.commit.interval.ms = 5000
9380 auto.offset.reset = earliest
9381 bootstrap.servers = [localhost:6001]
9382 check.crcs = true
9383 client.dns.lookup = use_all_dns_ips
9384 client.id = consumer-g7_1-16
9385 client.rack =
9386 connections.max.idle.ms = 540000
9387 default.api.timeout.ms = 60000
9388 enable.auto.commit = false
9389 enable.metrics.push = true
9390 exclude.internal.topics = true
9391 fetch.max.bytes = 52428800
9392 fetch.max.wait.ms = 500
9393 fetch.min.bytes = 1
9394 group.id = g7_1
9395 group.instance.id = null
9396 group.protocol = classic
9397 group.remote.assignor = null
9398 heartbeat.interval.ms = 3000
9399 interceptor.classes = []
9400 internal.leave.group.on.close = true
9401 internal.throw.on.fetch.stable.offset.unsupported = false
9402 isolation.level = read_uncommitted
9403 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9404 max.partition.fetch.bytes = 1048576
9405 max.poll.interval.ms = 300000
9406 max.poll.records = 500
9407 metadata.max.age.ms = 300000
9408 metadata.recovery.rebootstrap.trigger.ms = 300000
9409 metadata.recovery.strategy = rebootstrap
9410 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9411 metrics.num.samples = 2
9412 metrics.recording.level = INFO
9413 metrics.sample.window.ms = 30000
9414 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9415 receive.buffer.bytes = 65536
9416 reconnect.backoff.max.ms = 1000
9417 reconnect.backoff.ms = 50
9418 request.timeout.ms = 30000
9419 retry.backoff.max.ms = 1000
9420 retry.backoff.ms = 100
9421 sasl.client.callback.handler.class = null
9422 sasl.jaas.config = null
9423 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9424 sasl.kerberos.min.time.before.relogin = 60000
9425 sasl.kerberos.service.name = null
9426 sasl.kerberos.ticket.renew.jitter = 0.05
9427 sasl.kerberos.ticket.renew.window.factor = 0.8
9428 sasl.login.callback.handler.class = null
9429 sasl.login.class = null
9430 sasl.login.connect.timeout.ms = null
9431 sasl.login.read.timeout.ms = null
9432 sasl.login.refresh.buffer.seconds = 300
9433 sasl.login.refresh.min.period.seconds = 60
9434 sasl.login.refresh.window.factor = 0.8
9435 sasl.login.refresh.window.jitter = 0.05
9436 sasl.login.retry.backoff.max.ms = 10000
9437 sasl.login.retry.backoff.ms = 100
9438 sasl.mechanism = GSSAPI
9439 sasl.oauthbearer.assertion.algorithm = RS256
9440 sasl.oauthbearer.assertion.claim.aud = null
9441 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9442 sasl.oauthbearer.assertion.claim.iss = null
9443 sasl.oauthbearer.assertion.claim.jti.include = false
9444 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9445 sasl.oauthbearer.assertion.claim.sub = null
9446 sasl.oauthbearer.assertion.file = null
9447 sasl.oauthbearer.assertion.private.key.file = null
9448 sasl.oauthbearer.assertion.private.key.passphrase = null
9449 sasl.oauthbearer.assertion.template.file = null
9450 sasl.oauthbearer.client.credentials.client.id = null
9451 sasl.oauthbearer.client.credentials.client.secret = null
9452 sasl.oauthbearer.clock.skew.seconds = 30
9453 sasl.oauthbearer.expected.audience = null
9454 sasl.oauthbearer.expected.issuer = null
9455 sasl.oauthbearer.header.urlencode = false
9456 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9457 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9458 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9459 sasl.oauthbearer.jwks.endpoint.url = null
9460 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9461 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9462 sasl.oauthbearer.scope = null
9463 sasl.oauthbearer.scope.claim.name = scope
9464 sasl.oauthbearer.sub.claim.name = sub
9465 sasl.oauthbearer.token.endpoint.url = null
9466 security.protocol = PLAINTEXT
9467 security.providers = null
9468 send.buffer.bytes = 131072
9469 session.timeout.ms = 45000
9470 share.acknowledgement.mode = implicit
9471 socket.connection.setup.timeout.max.ms = 30000
9472 socket.connection.setup.timeout.ms = 10000
9473 ssl.cipher.suites = null
9474 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9475 ssl.endpoint.identification.algorithm = https
9476 ssl.engine.factory.class = null
9477 ssl.key.password = null
9478 ssl.keymanager.algorithm = SunX509
9479 ssl.keystore.certificate.chain = null
9480 ssl.keystore.key = null
9481 ssl.keystore.location = null
9482 ssl.keystore.password = null
9483 ssl.keystore.type = JKS
9484 ssl.protocol = TLSv1.3
9485 ssl.provider = null
9486 ssl.secure.random.implementation = null
9487 ssl.trustmanager.algorithm = PKIX
9488 ssl.truststore.certificates = null
9489 ssl.truststore.location = null
9490 ssl.truststore.password = null
9491 ssl.truststore.type = JKS
9492 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9493
949414:31:01.993 [virtual-744] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
949514:31:01.996 [virtual-744] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
949614:31:01.996 [virtual-744] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
949714:31:01.996 [virtual-744] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855061996
949814:31:01.997 [virtual-747] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Subscribed to topic(s): t7_1
949914:31:02.000 [virtual-747] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
950014:31:02.000 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
950114:31:02.001 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
950214:31:02.003 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9 and requesting the member to rejoin with this id.
950314:31:02.003 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9
950414:31:02.003 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
950514:31:02.004 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9 joins group g7_1 in Empty state. Adding to the group now.
950614:31:02.004 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9 with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9).
950714:31:05.004 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 3 with 1 members.
950814:31:05.004 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9', protocol='range'}
950914:31:05.005 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Finished assignment for group at generation 3: {consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9=Assignment(partitions=[t7_1-0])}
951014:31:05.005 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9 for group g7_1 for generation 3. The group has 1 members, 0 of which are static.
951114:31:05.011 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9', protocol='range'}
951214:31:05.012 [virtual-747] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
951314:31:05.012 [virtual-747] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
951414:31:05.013 [virtual-747] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t7_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
951514:31:05.016 [virtual-744] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9516 allow.auto.create.topics = true
9517 auto.commit.interval.ms = 5000
9518 auto.offset.reset = earliest
9519 bootstrap.servers = [localhost:6001]
9520 check.crcs = true
9521 client.dns.lookup = use_all_dns_ips
9522 client.id = consumer-g7_2-17
9523 client.rack =
9524 connections.max.idle.ms = 540000
9525 default.api.timeout.ms = 60000
9526 enable.auto.commit = false
9527 enable.metrics.push = true
9528 exclude.internal.topics = true
9529 fetch.max.bytes = 52428800
9530 fetch.max.wait.ms = 500
9531 fetch.min.bytes = 1
9532 group.id = g7_2
9533 group.instance.id = null
9534 group.protocol = classic
9535 group.remote.assignor = null
9536 heartbeat.interval.ms = 3000
9537 interceptor.classes = []
9538 internal.leave.group.on.close = true
9539 internal.throw.on.fetch.stable.offset.unsupported = false
9540 isolation.level = read_uncommitted
9541 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9542 max.partition.fetch.bytes = 1048576
9543 max.poll.interval.ms = 300000
9544 max.poll.records = 500
9545 metadata.max.age.ms = 300000
9546 metadata.recovery.rebootstrap.trigger.ms = 300000
9547 metadata.recovery.strategy = rebootstrap
9548 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9549 metrics.num.samples = 2
9550 metrics.recording.level = INFO
9551 metrics.sample.window.ms = 30000
9552 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9553 receive.buffer.bytes = 65536
9554 reconnect.backoff.max.ms = 1000
9555 reconnect.backoff.ms = 50
9556 request.timeout.ms = 30000
9557 retry.backoff.max.ms = 1000
9558 retry.backoff.ms = 100
9559 sasl.client.callback.handler.class = null
9560 sasl.jaas.config = null
9561 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9562 sasl.kerberos.min.time.before.relogin = 60000
9563 sasl.kerberos.service.name = null
9564 sasl.kerberos.ticket.renew.jitter = 0.05
9565 sasl.kerberos.ticket.renew.window.factor = 0.8
9566 sasl.login.callback.handler.class = null
9567 sasl.login.class = null
9568 sasl.login.connect.timeout.ms = null
9569 sasl.login.read.timeout.ms = null
9570 sasl.login.refresh.buffer.seconds = 300
9571 sasl.login.refresh.min.period.seconds = 60
9572 sasl.login.refresh.window.factor = 0.8
9573 sasl.login.refresh.window.jitter = 0.05
9574 sasl.login.retry.backoff.max.ms = 10000
9575 sasl.login.retry.backoff.ms = 100
9576 sasl.mechanism = GSSAPI
9577 sasl.oauthbearer.assertion.algorithm = RS256
9578 sasl.oauthbearer.assertion.claim.aud = null
9579 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9580 sasl.oauthbearer.assertion.claim.iss = null
9581 sasl.oauthbearer.assertion.claim.jti.include = false
9582 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9583 sasl.oauthbearer.assertion.claim.sub = null
9584 sasl.oauthbearer.assertion.file = null
9585 sasl.oauthbearer.assertion.private.key.file = null
9586 sasl.oauthbearer.assertion.private.key.passphrase = null
9587 sasl.oauthbearer.assertion.template.file = null
9588 sasl.oauthbearer.client.credentials.client.id = null
9589 sasl.oauthbearer.client.credentials.client.secret = null
9590 sasl.oauthbearer.clock.skew.seconds = 30
9591 sasl.oauthbearer.expected.audience = null
9592 sasl.oauthbearer.expected.issuer = null
9593 sasl.oauthbearer.header.urlencode = false
9594 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9595 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9596 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9597 sasl.oauthbearer.jwks.endpoint.url = null
9598 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9599 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9600 sasl.oauthbearer.scope = null
9601 sasl.oauthbearer.scope.claim.name = scope
9602 sasl.oauthbearer.sub.claim.name = sub
9603 sasl.oauthbearer.token.endpoint.url = null
9604 security.protocol = PLAINTEXT
9605 security.providers = null
9606 send.buffer.bytes = 131072
9607 session.timeout.ms = 45000
9608 share.acknowledgement.mode = implicit
9609 socket.connection.setup.timeout.max.ms = 30000
9610 socket.connection.setup.timeout.ms = 10000
9611 ssl.cipher.suites = null
9612 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9613 ssl.endpoint.identification.algorithm = https
9614 ssl.engine.factory.class = null
9615 ssl.key.password = null
9616 ssl.keymanager.algorithm = SunX509
9617 ssl.keystore.certificate.chain = null
9618 ssl.keystore.key = null
9619 ssl.keystore.location = null
9620 ssl.keystore.password = null
9621 ssl.keystore.type = JKS
9622 ssl.protocol = TLSv1.3
9623 ssl.provider = null
9624 ssl.secure.random.implementation = null
9625 ssl.trustmanager.algorithm = PKIX
9626 ssl.truststore.certificates = null
9627 ssl.truststore.location = null
9628 ssl.truststore.password = null
9629 ssl.truststore.type = JKS
9630 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9631
963214:31:05.016 [virtual-744] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
963314:31:05.018 [virtual-744] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
963414:31:05.018 [virtual-744] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
963514:31:05.018 [virtual-744] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855065018
963614:31:05.018 [virtual-751] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Subscribed to topic(s): t7_1
963714:31:05.022 [virtual-751] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
963814:31:05.023 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
963914:31:05.023 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
964014:31:05.025 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_2 in Empty state. Created a new member id consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f and requesting the member to rejoin with this id.
964114:31:05.026 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: need to re-join with the given member-id: consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f
964214:31:05.026 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
964314:31:05.027 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f joins group g7_2 in Empty state. Adding to the group now.
964414:31:05.027 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f).
964514:31:08.027 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_2 generation 1 with 1 members.
964614:31:08.027 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f', protocol='range'}
964714:31:08.027 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Finished assignment for group at generation 1: {consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f=Assignment(partitions=[t7_1-0])}
964814:31:08.028 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f for group g7_2 for generation 1. The group has 1 members, 0 of which are static.
964914:31:08.035 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f', protocol='range'}
965014:31:08.035 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Notifying assignor about the new Assignment(partitions=[t7_1-0])
965114:31:08.035 [virtual-751] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Adding newly assigned partitions: [t7_1-0]
965214:31:08.036 [virtual-751] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Found no committed offset for partition t7_1-0
965314:31:08.038 [virtual-751] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
965414:31:08.040 [virtual-750] ERROR o.k.KafkaFlow$ - Exception when polling for records
9655java.lang.InterruptedException: null
9656 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9657 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9658 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9659 at ox.channels.ActorRef.ask(actor.scala:64)
9660 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9661 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9662 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9663 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9664 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9665 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9666 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9667 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9668 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9669 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
967014:31:08.040 [virtual-751] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9671java.lang.InterruptedException: null
9672 ... 18 common frames omitted
9673Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9674 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9675 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9676 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9677 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9678 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9679 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9680 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9681 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9682 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9683 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9684 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9685 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9686 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9687 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9688 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9689 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9690 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9691 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
969214:31:08.041 [virtual-746] ERROR o.k.KafkaFlow$ - Exception when polling for records
9693java.lang.InterruptedException: null
9694 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9695 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9696 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9697 at ox.channels.ActorRef.ask(actor.scala:64)
9698 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9699 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9700 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9701 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9702 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9703 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9704 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9705 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9706 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9707 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
970814:31:08.041 [virtual-747] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9709java.lang.InterruptedException: null
9710 ... 18 common frames omitted
9711Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9712 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9713 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9714 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9715 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9716 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9717 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9718 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9719 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9720 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9721 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9722 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9723 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9724 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9725 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9726 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9727 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9728 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9729 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
973014:31:08.042 [virtual-753] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Revoke previously assigned partitions [t7_1-0]
973114:31:08.042 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Member consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
973214:31:08.042 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting generation and member id due to: consumer pro-actively leaving the group
973314:31:08.042 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: consumer pro-actively leaving the group
973414:31:08.043 [virtual-754] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
973514:31:08.043 [virtual-754] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Member consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
973614:31:08.043 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_2] Member consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
973714:31:08.043 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_2-17-3371ee22-76d4-481d-b1b9-f2ae3f89367f) members.).
973814:31:08.043 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_2 with generation 2 is now empty.
973914:31:08.044 [virtual-754] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
974014:31:08.044 [virtual-754] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
974114:31:08.044 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
974214:31:08.044 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g7_1-16-b8ae57c7-8e6c-4497-922d-4d57bb29bdd9) members.).
974314:31:08.044 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 4 is now empty.
974414:31:08.526 [virtual-754] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
974514:31:08.526 [virtual-754] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
974614:31:08.526 [virtual-754] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
974714:31:08.526 [virtual-754] INFO o.a.k.c.m.Metrics - Metrics reporters closed
974814:31:08.528 [virtual-754] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-16 unregistered
974914:31:08.542 [virtual-753] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
975014:31:08.542 [virtual-753] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
975114:31:08.542 [virtual-753] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
975214:31:08.542 [virtual-753] INFO o.a.k.c.m.Metrics - Metrics reporters closed
975314:31:08.543 [virtual-753] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_2-17 unregistered
975414:31:08.545 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9755 acks = -1
9756 batch.size = 16384
9757 bootstrap.servers = [localhost:6001]
9758 buffer.memory = 33554432
9759 client.dns.lookup = use_all_dns_ips
9760 client.id = producer-25
9761 compression.gzip.level = -1
9762 compression.lz4.level = 9
9763 compression.type = none
9764 compression.zstd.level = 3
9765 connections.max.idle.ms = 540000
9766 delivery.timeout.ms = 120000
9767 enable.idempotence = true
9768 enable.metrics.push = true
9769 interceptor.classes = []
9770 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9771 linger.ms = 5
9772 max.block.ms = 10000
9773 max.in.flight.requests.per.connection = 5
9774 max.request.size = 1048576
9775 metadata.max.age.ms = 300000
9776 metadata.max.idle.ms = 300000
9777 metadata.recovery.rebootstrap.trigger.ms = 300000
9778 metadata.recovery.strategy = rebootstrap
9779 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9780 metrics.num.samples = 2
9781 metrics.recording.level = INFO
9782 metrics.sample.window.ms = 30000
9783 partitioner.adaptive.partitioning.enable = true
9784 partitioner.availability.timeout.ms = 0
9785 partitioner.class = null
9786 partitioner.ignore.keys = false
9787 receive.buffer.bytes = 32768
9788 reconnect.backoff.max.ms = 1000
9789 reconnect.backoff.ms = 50
9790 request.timeout.ms = 30000
9791 retries = 2147483647
9792 retry.backoff.max.ms = 1000
9793 retry.backoff.ms = 1000
9794 sasl.client.callback.handler.class = null
9795 sasl.jaas.config = null
9796 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9797 sasl.kerberos.min.time.before.relogin = 60000
9798 sasl.kerberos.service.name = null
9799 sasl.kerberos.ticket.renew.jitter = 0.05
9800 sasl.kerberos.ticket.renew.window.factor = 0.8
9801 sasl.login.callback.handler.class = null
9802 sasl.login.class = null
9803 sasl.login.connect.timeout.ms = null
9804 sasl.login.read.timeout.ms = null
9805 sasl.login.refresh.buffer.seconds = 300
9806 sasl.login.refresh.min.period.seconds = 60
9807 sasl.login.refresh.window.factor = 0.8
9808 sasl.login.refresh.window.jitter = 0.05
9809 sasl.login.retry.backoff.max.ms = 10000
9810 sasl.login.retry.backoff.ms = 100
9811 sasl.mechanism = GSSAPI
9812 sasl.oauthbearer.assertion.algorithm = RS256
9813 sasl.oauthbearer.assertion.claim.aud = null
9814 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9815 sasl.oauthbearer.assertion.claim.iss = null
9816 sasl.oauthbearer.assertion.claim.jti.include = false
9817 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9818 sasl.oauthbearer.assertion.claim.sub = null
9819 sasl.oauthbearer.assertion.file = null
9820 sasl.oauthbearer.assertion.private.key.file = null
9821 sasl.oauthbearer.assertion.private.key.passphrase = null
9822 sasl.oauthbearer.assertion.template.file = null
9823 sasl.oauthbearer.client.credentials.client.id = null
9824 sasl.oauthbearer.client.credentials.client.secret = null
9825 sasl.oauthbearer.clock.skew.seconds = 30
9826 sasl.oauthbearer.expected.audience = null
9827 sasl.oauthbearer.expected.issuer = null
9828 sasl.oauthbearer.header.urlencode = false
9829 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9830 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9831 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9832 sasl.oauthbearer.jwks.endpoint.url = null
9833 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9834 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9835 sasl.oauthbearer.scope = null
9836 sasl.oauthbearer.scope.claim.name = scope
9837 sasl.oauthbearer.sub.claim.name = sub
9838 sasl.oauthbearer.token.endpoint.url = null
9839 security.protocol = PLAINTEXT
9840 security.providers = null
9841 send.buffer.bytes = 131072
9842 socket.connection.setup.timeout.max.ms = 30000
9843 socket.connection.setup.timeout.ms = 10000
9844 ssl.cipher.suites = null
9845 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9846 ssl.endpoint.identification.algorithm = https
9847 ssl.engine.factory.class = null
9848 ssl.key.password = null
9849 ssl.keymanager.algorithm = SunX509
9850 ssl.keystore.certificate.chain = null
9851 ssl.keystore.key = null
9852 ssl.keystore.location = null
9853 ssl.keystore.password = null
9854 ssl.keystore.type = JKS
9855 ssl.protocol = TLSv1.3
9856 ssl.provider = null
9857 ssl.secure.random.implementation = null
9858 ssl.trustmanager.algorithm = PKIX
9859 ssl.truststore.certificates = null
9860 ssl.truststore.location = null
9861 ssl.truststore.password = null
9862 ssl.truststore.type = JKS
9863 transaction.timeout.ms = 60000
9864 transaction.two.phase.commit.enable = false
9865 transactional.id = null
9866 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9867
986814:31:08.545 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
986914:31:08.546 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Instantiated an idempotent producer.
987014:31:08.547 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
987114:31:08.547 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
987214:31:08.547 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855068547
987314:31:08.549 [data-plane-kafka-request-handler-3] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t8_1) to the active controller.
987414:31:08.550 [kafka-producer-network-thread | producer-25] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-25] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t8_1=UNKNOWN_TOPIC_OR_PARTITION}
987514:31:08.550 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.Metadata - [Producer clientId=producer-25] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
987614:31:08.550 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t8_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
987714:31:08.550 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t8_1 with topic ID O922ajB_S62rbtcrcEPDMg.
987814:31:08.551 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t8_1-0 with topic ID O922ajB_S62rbtcrcEPDMg and PartitionRegistration(replicas=[0], directories=[wZSvsjOHZk681DfKN9_ltw], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
987914:31:08.552 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-25] ProducerId set to 24 with epoch 0
988014:31:08.576 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
988114:31:08.576 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t8_1-0)
988214:31:08.576 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t8_1-0 with topic id O922ajB_S62rbtcrcEPDMg.
988314:31:08.578 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t8_1-0, dir=/tmp/kafka-logs7572554700115704093] Loading producer state till offset 0
988414:31:08.579 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t8_1-0 in /tmp/kafka-logs7572554700115704093/t8_1-0 with properties {}
988514:31:08.579 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] No checkpointed highwatermark is found for partition t8_1-0
988614:31:08.579 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] Log loaded for partition t8_1-0 with initial high watermark 0
988714:31:08.579 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t8_1-0 with topic id Some(O922ajB_S62rbtcrcEPDMg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
988814:31:09.561 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
988914:31:09.563 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
989014:31:09.563 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
989114:31:09.563 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
989214:31:09.563 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
989314:31:09.563 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-25 unregistered
989414:31:09.564 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9895 acks = -1
9896 batch.size = 16384
9897 bootstrap.servers = [localhost:6001]
9898 buffer.memory = 33554432
9899 client.dns.lookup = use_all_dns_ips
9900 client.id = producer-26
9901 compression.gzip.level = -1
9902 compression.lz4.level = 9
9903 compression.type = none
9904 compression.zstd.level = 3
9905 connections.max.idle.ms = 540000
9906 delivery.timeout.ms = 120000
9907 enable.idempotence = true
9908 enable.metrics.push = true
9909 interceptor.classes = []
9910 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9911 linger.ms = 5
9912 max.block.ms = 10000
9913 max.in.flight.requests.per.connection = 5
9914 max.request.size = 1048576
9915 metadata.max.age.ms = 300000
9916 metadata.max.idle.ms = 300000
9917 metadata.recovery.rebootstrap.trigger.ms = 300000
9918 metadata.recovery.strategy = rebootstrap
9919 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9920 metrics.num.samples = 2
9921 metrics.recording.level = INFO
9922 metrics.sample.window.ms = 30000
9923 partitioner.adaptive.partitioning.enable = true
9924 partitioner.availability.timeout.ms = 0
9925 partitioner.class = null
9926 partitioner.ignore.keys = false
9927 receive.buffer.bytes = 32768
9928 reconnect.backoff.max.ms = 1000
9929 reconnect.backoff.ms = 50
9930 request.timeout.ms = 30000
9931 retries = 2147483647
9932 retry.backoff.max.ms = 1000
9933 retry.backoff.ms = 1000
9934 sasl.client.callback.handler.class = null
9935 sasl.jaas.config = null
9936 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9937 sasl.kerberos.min.time.before.relogin = 60000
9938 sasl.kerberos.service.name = null
9939 sasl.kerberos.ticket.renew.jitter = 0.05
9940 sasl.kerberos.ticket.renew.window.factor = 0.8
9941 sasl.login.callback.handler.class = null
9942 sasl.login.class = null
9943 sasl.login.connect.timeout.ms = null
9944 sasl.login.read.timeout.ms = null
9945 sasl.login.refresh.buffer.seconds = 300
9946 sasl.login.refresh.min.period.seconds = 60
9947 sasl.login.refresh.window.factor = 0.8
9948 sasl.login.refresh.window.jitter = 0.05
9949 sasl.login.retry.backoff.max.ms = 10000
9950 sasl.login.retry.backoff.ms = 100
9951 sasl.mechanism = GSSAPI
9952 sasl.oauthbearer.assertion.algorithm = RS256
9953 sasl.oauthbearer.assertion.claim.aud = null
9954 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9955 sasl.oauthbearer.assertion.claim.iss = null
9956 sasl.oauthbearer.assertion.claim.jti.include = false
9957 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9958 sasl.oauthbearer.assertion.claim.sub = null
9959 sasl.oauthbearer.assertion.file = null
9960 sasl.oauthbearer.assertion.private.key.file = null
9961 sasl.oauthbearer.assertion.private.key.passphrase = null
9962 sasl.oauthbearer.assertion.template.file = null
9963 sasl.oauthbearer.client.credentials.client.id = null
9964 sasl.oauthbearer.client.credentials.client.secret = null
9965 sasl.oauthbearer.clock.skew.seconds = 30
9966 sasl.oauthbearer.expected.audience = null
9967 sasl.oauthbearer.expected.issuer = null
9968 sasl.oauthbearer.header.urlencode = false
9969 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9970 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9971 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9972 sasl.oauthbearer.jwks.endpoint.url = null
9973 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9974 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9975 sasl.oauthbearer.scope = null
9976 sasl.oauthbearer.scope.claim.name = scope
9977 sasl.oauthbearer.sub.claim.name = sub
9978 sasl.oauthbearer.token.endpoint.url = null
9979 security.protocol = PLAINTEXT
9980 security.providers = null
9981 send.buffer.bytes = 131072
9982 socket.connection.setup.timeout.max.ms = 30000
9983 socket.connection.setup.timeout.ms = 10000
9984 ssl.cipher.suites = null
9985 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9986 ssl.endpoint.identification.algorithm = https
9987 ssl.engine.factory.class = null
9988 ssl.key.password = null
9989 ssl.keymanager.algorithm = SunX509
9990 ssl.keystore.certificate.chain = null
9991 ssl.keystore.key = null
9992 ssl.keystore.location = null
9993 ssl.keystore.password = null
9994 ssl.keystore.type = JKS
9995 ssl.protocol = TLSv1.3
9996 ssl.provider = null
9997 ssl.secure.random.implementation = null
9998 ssl.trustmanager.algorithm = PKIX
9999 ssl.truststore.certificates = null
10000 ssl.truststore.location = null
10001 ssl.truststore.password = null
10002 ssl.truststore.type = JKS
10003 transaction.timeout.ms = 60000
10004 transaction.two.phase.commit.enable = false
10005 transactional.id = null
10006 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10007
1000814:31:09.564 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1000914:31:09.565 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Instantiated an idempotent producer.
1001014:31:09.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1001114:31:09.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1001214:31:09.567 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855069567
1001314:31:09.570 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.Metadata - [Producer clientId=producer-26] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
1001414:31:09.570 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-26] ProducerId set to 25 with epoch 0
1001514:31:09.578 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1001614:31:09.579 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1001714:31:09.580 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1001814:31:09.580 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1001914:31:09.580 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1002014:31:09.580 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-26 unregistered
1002114:31:09.580 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10022 acks = -1
10023 batch.size = 16384
10024 bootstrap.servers = [localhost:6001]
10025 buffer.memory = 33554432
10026 client.dns.lookup = use_all_dns_ips
10027 client.id = producer-27
10028 compression.gzip.level = -1
10029 compression.lz4.level = 9
10030 compression.type = none
10031 compression.zstd.level = 3
10032 connections.max.idle.ms = 540000
10033 delivery.timeout.ms = 120000
10034 enable.idempotence = true
10035 enable.metrics.push = true
10036 interceptor.classes = []
10037 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10038 linger.ms = 5
10039 max.block.ms = 10000
10040 max.in.flight.requests.per.connection = 5
10041 max.request.size = 1048576
10042 metadata.max.age.ms = 300000
10043 metadata.max.idle.ms = 300000
10044 metadata.recovery.rebootstrap.trigger.ms = 300000
10045 metadata.recovery.strategy = rebootstrap
10046 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10047 metrics.num.samples = 2
10048 metrics.recording.level = INFO
10049 metrics.sample.window.ms = 30000
10050 partitioner.adaptive.partitioning.enable = true
10051 partitioner.availability.timeout.ms = 0
10052 partitioner.class = null
10053 partitioner.ignore.keys = false
10054 receive.buffer.bytes = 32768
10055 reconnect.backoff.max.ms = 1000
10056 reconnect.backoff.ms = 50
10057 request.timeout.ms = 30000
10058 retries = 2147483647
10059 retry.backoff.max.ms = 1000
10060 retry.backoff.ms = 1000
10061 sasl.client.callback.handler.class = null
10062 sasl.jaas.config = null
10063 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10064 sasl.kerberos.min.time.before.relogin = 60000
10065 sasl.kerberos.service.name = null
10066 sasl.kerberos.ticket.renew.jitter = 0.05
10067 sasl.kerberos.ticket.renew.window.factor = 0.8
10068 sasl.login.callback.handler.class = null
10069 sasl.login.class = null
10070 sasl.login.connect.timeout.ms = null
10071 sasl.login.read.timeout.ms = null
10072 sasl.login.refresh.buffer.seconds = 300
10073 sasl.login.refresh.min.period.seconds = 60
10074 sasl.login.refresh.window.factor = 0.8
10075 sasl.login.refresh.window.jitter = 0.05
10076 sasl.login.retry.backoff.max.ms = 10000
10077 sasl.login.retry.backoff.ms = 100
10078 sasl.mechanism = GSSAPI
10079 sasl.oauthbearer.assertion.algorithm = RS256
10080 sasl.oauthbearer.assertion.claim.aud = null
10081 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10082 sasl.oauthbearer.assertion.claim.iss = null
10083 sasl.oauthbearer.assertion.claim.jti.include = false
10084 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10085 sasl.oauthbearer.assertion.claim.sub = null
10086 sasl.oauthbearer.assertion.file = null
10087 sasl.oauthbearer.assertion.private.key.file = null
10088 sasl.oauthbearer.assertion.private.key.passphrase = null
10089 sasl.oauthbearer.assertion.template.file = null
10090 sasl.oauthbearer.client.credentials.client.id = null
10091 sasl.oauthbearer.client.credentials.client.secret = null
10092 sasl.oauthbearer.clock.skew.seconds = 30
10093 sasl.oauthbearer.expected.audience = null
10094 sasl.oauthbearer.expected.issuer = null
10095 sasl.oauthbearer.header.urlencode = false
10096 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10097 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10098 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10099 sasl.oauthbearer.jwks.endpoint.url = null
10100 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10101 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10102 sasl.oauthbearer.scope = null
10103 sasl.oauthbearer.scope.claim.name = scope
10104 sasl.oauthbearer.sub.claim.name = sub
10105 sasl.oauthbearer.token.endpoint.url = null
10106 security.protocol = PLAINTEXT
10107 security.providers = null
10108 send.buffer.bytes = 131072
10109 socket.connection.setup.timeout.max.ms = 30000
10110 socket.connection.setup.timeout.ms = 10000
10111 ssl.cipher.suites = null
10112 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10113 ssl.endpoint.identification.algorithm = https
10114 ssl.engine.factory.class = null
10115 ssl.key.password = null
10116 ssl.keymanager.algorithm = SunX509
10117 ssl.keystore.certificate.chain = null
10118 ssl.keystore.key = null
10119 ssl.keystore.location = null
10120 ssl.keystore.password = null
10121 ssl.keystore.type = JKS
10122 ssl.protocol = TLSv1.3
10123 ssl.provider = null
10124 ssl.secure.random.implementation = null
10125 ssl.trustmanager.algorithm = PKIX
10126 ssl.truststore.certificates = null
10127 ssl.truststore.location = null
10128 ssl.truststore.password = null
10129 ssl.truststore.type = JKS
10130 transaction.timeout.ms = 60000
10131 transaction.two.phase.commit.enable = false
10132 transactional.id = null
10133 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10134
1013514:31:09.580 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1013614:31:09.581 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Instantiated an idempotent producer.
1013714:31:09.582 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1013814:31:09.582 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1013914:31:09.582 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855069582
1014014:31:09.584 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.Metadata - [Producer clientId=producer-27] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
1014114:31:09.584 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-27] ProducerId set to 26 with epoch 0
1014214:31:09.592 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1014314:31:09.593 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1014414:31:09.593 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1014514:31:09.593 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1014614:31:09.593 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1014714:31:09.594 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-27 unregistered
1014814:31:09.594 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10149 acks = -1
10150 batch.size = 16384
10151 bootstrap.servers = [localhost:6001]
10152 buffer.memory = 33554432
10153 client.dns.lookup = use_all_dns_ips
10154 client.id = producer-28
10155 compression.gzip.level = -1
10156 compression.lz4.level = 9
10157 compression.type = none
10158 compression.zstd.level = 3
10159 connections.max.idle.ms = 540000
10160 delivery.timeout.ms = 120000
10161 enable.idempotence = true
10162 enable.metrics.push = true
10163 interceptor.classes = []
10164 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10165 linger.ms = 5
10166 max.block.ms = 10000
10167 max.in.flight.requests.per.connection = 5
10168 max.request.size = 1048576
10169 metadata.max.age.ms = 300000
10170 metadata.max.idle.ms = 300000
10171 metadata.recovery.rebootstrap.trigger.ms = 300000
10172 metadata.recovery.strategy = rebootstrap
10173 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10174 metrics.num.samples = 2
10175 metrics.recording.level = INFO
10176 metrics.sample.window.ms = 30000
10177 partitioner.adaptive.partitioning.enable = true
10178 partitioner.availability.timeout.ms = 0
10179 partitioner.class = null
10180 partitioner.ignore.keys = false
10181 receive.buffer.bytes = 32768
10182 reconnect.backoff.max.ms = 1000
10183 reconnect.backoff.ms = 50
10184 request.timeout.ms = 30000
10185 retries = 2147483647
10186 retry.backoff.max.ms = 1000
10187 retry.backoff.ms = 1000
10188 sasl.client.callback.handler.class = null
10189 sasl.jaas.config = null
10190 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10191 sasl.kerberos.min.time.before.relogin = 60000
10192 sasl.kerberos.service.name = null
10193 sasl.kerberos.ticket.renew.jitter = 0.05
10194 sasl.kerberos.ticket.renew.window.factor = 0.8
10195 sasl.login.callback.handler.class = null
10196 sasl.login.class = null
10197 sasl.login.connect.timeout.ms = null
10198 sasl.login.read.timeout.ms = null
10199 sasl.login.refresh.buffer.seconds = 300
10200 sasl.login.refresh.min.period.seconds = 60
10201 sasl.login.refresh.window.factor = 0.8
10202 sasl.login.refresh.window.jitter = 0.05
10203 sasl.login.retry.backoff.max.ms = 10000
10204 sasl.login.retry.backoff.ms = 100
10205 sasl.mechanism = GSSAPI
10206 sasl.oauthbearer.assertion.algorithm = RS256
10207 sasl.oauthbearer.assertion.claim.aud = null
10208 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10209 sasl.oauthbearer.assertion.claim.iss = null
10210 sasl.oauthbearer.assertion.claim.jti.include = false
10211 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10212 sasl.oauthbearer.assertion.claim.sub = null
10213 sasl.oauthbearer.assertion.file = null
10214 sasl.oauthbearer.assertion.private.key.file = null
10215 sasl.oauthbearer.assertion.private.key.passphrase = null
10216 sasl.oauthbearer.assertion.template.file = null
10217 sasl.oauthbearer.client.credentials.client.id = null
10218 sasl.oauthbearer.client.credentials.client.secret = null
10219 sasl.oauthbearer.clock.skew.seconds = 30
10220 sasl.oauthbearer.expected.audience = null
10221 sasl.oauthbearer.expected.issuer = null
10222 sasl.oauthbearer.header.urlencode = false
10223 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10224 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10225 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10226 sasl.oauthbearer.jwks.endpoint.url = null
10227 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10228 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10229 sasl.oauthbearer.scope = null
10230 sasl.oauthbearer.scope.claim.name = scope
10231 sasl.oauthbearer.sub.claim.name = sub
10232 sasl.oauthbearer.token.endpoint.url = null
10233 security.protocol = PLAINTEXT
10234 security.providers = null
10235 send.buffer.bytes = 131072
10236 socket.connection.setup.timeout.max.ms = 30000
10237 socket.connection.setup.timeout.ms = 10000
10238 ssl.cipher.suites = null
10239 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10240 ssl.endpoint.identification.algorithm = https
10241 ssl.engine.factory.class = null
10242 ssl.key.password = null
10243 ssl.keymanager.algorithm = SunX509
10244 ssl.keystore.certificate.chain = null
10245 ssl.keystore.key = null
10246 ssl.keystore.location = null
10247 ssl.keystore.password = null
10248 ssl.keystore.type = JKS
10249 ssl.protocol = TLSv1.3
10250 ssl.provider = null
10251 ssl.secure.random.implementation = null
10252 ssl.trustmanager.algorithm = PKIX
10253 ssl.truststore.certificates = null
10254 ssl.truststore.location = null
10255 ssl.truststore.password = null
10256 ssl.truststore.type = JKS
10257 transaction.timeout.ms = 60000
10258 transaction.two.phase.commit.enable = false
10259 transactional.id = null
10260 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10261
1026214:31:09.594 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1026314:31:09.594 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Instantiated an idempotent producer.
1026414:31:09.596 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1026514:31:09.596 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1026614:31:09.596 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855069596
1026714:31:09.597 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.Metadata - [Producer clientId=producer-28] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
1026814:31:09.597 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-28] ProducerId set to 27 with epoch 0
1026914:31:09.606 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1027014:31:09.607 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1027114:31:09.607 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1027214:31:09.607 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1027314:31:09.607 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1027414:31:09.607 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-28 unregistered
1027514:31:09.607 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10276 acks = -1
10277 batch.size = 16384
10278 bootstrap.servers = [localhost:6001]
10279 buffer.memory = 33554432
10280 client.dns.lookup = use_all_dns_ips
10281 client.id = producer-29
10282 compression.gzip.level = -1
10283 compression.lz4.level = 9
10284 compression.type = none
10285 compression.zstd.level = 3
10286 connections.max.idle.ms = 540000
10287 delivery.timeout.ms = 120000
10288 enable.idempotence = true
10289 enable.metrics.push = true
10290 interceptor.classes = []
10291 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10292 linger.ms = 5
10293 max.block.ms = 10000
10294 max.in.flight.requests.per.connection = 5
10295 max.request.size = 1048576
10296 metadata.max.age.ms = 300000
10297 metadata.max.idle.ms = 300000
10298 metadata.recovery.rebootstrap.trigger.ms = 300000
10299 metadata.recovery.strategy = rebootstrap
10300 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10301 metrics.num.samples = 2
10302 metrics.recording.level = INFO
10303 metrics.sample.window.ms = 30000
10304 partitioner.adaptive.partitioning.enable = true
10305 partitioner.availability.timeout.ms = 0
10306 partitioner.class = null
10307 partitioner.ignore.keys = false
10308 receive.buffer.bytes = 32768
10309 reconnect.backoff.max.ms = 1000
10310 reconnect.backoff.ms = 50
10311 request.timeout.ms = 30000
10312 retries = 2147483647
10313 retry.backoff.max.ms = 1000
10314 retry.backoff.ms = 1000
10315 sasl.client.callback.handler.class = null
10316 sasl.jaas.config = null
10317 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10318 sasl.kerberos.min.time.before.relogin = 60000
10319 sasl.kerberos.service.name = null
10320 sasl.kerberos.ticket.renew.jitter = 0.05
10321 sasl.kerberos.ticket.renew.window.factor = 0.8
10322 sasl.login.callback.handler.class = null
10323 sasl.login.class = null
10324 sasl.login.connect.timeout.ms = null
10325 sasl.login.read.timeout.ms = null
10326 sasl.login.refresh.buffer.seconds = 300
10327 sasl.login.refresh.min.period.seconds = 60
10328 sasl.login.refresh.window.factor = 0.8
10329 sasl.login.refresh.window.jitter = 0.05
10330 sasl.login.retry.backoff.max.ms = 10000
10331 sasl.login.retry.backoff.ms = 100
10332 sasl.mechanism = GSSAPI
10333 sasl.oauthbearer.assertion.algorithm = RS256
10334 sasl.oauthbearer.assertion.claim.aud = null
10335 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10336 sasl.oauthbearer.assertion.claim.iss = null
10337 sasl.oauthbearer.assertion.claim.jti.include = false
10338 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10339 sasl.oauthbearer.assertion.claim.sub = null
10340 sasl.oauthbearer.assertion.file = null
10341 sasl.oauthbearer.assertion.private.key.file = null
10342 sasl.oauthbearer.assertion.private.key.passphrase = null
10343 sasl.oauthbearer.assertion.template.file = null
10344 sasl.oauthbearer.client.credentials.client.id = null
10345 sasl.oauthbearer.client.credentials.client.secret = null
10346 sasl.oauthbearer.clock.skew.seconds = 30
10347 sasl.oauthbearer.expected.audience = null
10348 sasl.oauthbearer.expected.issuer = null
10349 sasl.oauthbearer.header.urlencode = false
10350 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10351 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10352 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10353 sasl.oauthbearer.jwks.endpoint.url = null
10354 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10355 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10356 sasl.oauthbearer.scope = null
10357 sasl.oauthbearer.scope.claim.name = scope
10358 sasl.oauthbearer.sub.claim.name = sub
10359 sasl.oauthbearer.token.endpoint.url = null
10360 security.protocol = PLAINTEXT
10361 security.providers = null
10362 send.buffer.bytes = 131072
10363 socket.connection.setup.timeout.max.ms = 30000
10364 socket.connection.setup.timeout.ms = 10000
10365 ssl.cipher.suites = null
10366 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10367 ssl.endpoint.identification.algorithm = https
10368 ssl.engine.factory.class = null
10369 ssl.key.password = null
10370 ssl.keymanager.algorithm = SunX509
10371 ssl.keystore.certificate.chain = null
10372 ssl.keystore.key = null
10373 ssl.keystore.location = null
10374 ssl.keystore.password = null
10375 ssl.keystore.type = JKS
10376 ssl.protocol = TLSv1.3
10377 ssl.provider = null
10378 ssl.secure.random.implementation = null
10379 ssl.trustmanager.algorithm = PKIX
10380 ssl.truststore.certificates = null
10381 ssl.truststore.location = null
10382 ssl.truststore.password = null
10383 ssl.truststore.type = JKS
10384 transaction.timeout.ms = 60000
10385 transaction.two.phase.commit.enable = false
10386 transactional.id = null
10387 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10388
1038914:31:09.607 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1039014:31:09.608 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Instantiated an idempotent producer.
1039114:31:09.609 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1039214:31:09.609 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1039314:31:09.609 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855069609
1039414:31:09.612 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.Metadata - [Producer clientId=producer-29] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
1039514:31:09.612 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-29] ProducerId set to 28 with epoch 0
1039614:31:09.620 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1039714:31:09.622 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1039814:31:09.622 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1039914:31:09.622 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1040014:31:09.622 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1040114:31:09.622 [pool-67-thread-7-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-29 unregistered
1040214:31:09.623 [virtual-760] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10403 allow.auto.create.topics = true
10404 auto.commit.interval.ms = 5000
10405 auto.offset.reset = earliest
10406 bootstrap.servers = [localhost:6001]
10407 check.crcs = true
10408 client.dns.lookup = use_all_dns_ips
10409 client.id = consumer-g8_1-18
10410 client.rack =
10411 connections.max.idle.ms = 540000
10412 default.api.timeout.ms = 60000
10413 enable.auto.commit = false
10414 enable.metrics.push = true
10415 exclude.internal.topics = true
10416 fetch.max.bytes = 52428800
10417 fetch.max.wait.ms = 500
10418 fetch.min.bytes = 1
10419 group.id = g8_1
10420 group.instance.id = null
10421 group.protocol = classic
10422 group.remote.assignor = null
10423 heartbeat.interval.ms = 3000
10424 interceptor.classes = []
10425 internal.leave.group.on.close = true
10426 internal.throw.on.fetch.stable.offset.unsupported = false
10427 isolation.level = read_uncommitted
10428 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10429 max.partition.fetch.bytes = 1048576
10430 max.poll.interval.ms = 300000
10431 max.poll.records = 500
10432 metadata.max.age.ms = 300000
10433 metadata.recovery.rebootstrap.trigger.ms = 300000
10434 metadata.recovery.strategy = rebootstrap
10435 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10436 metrics.num.samples = 2
10437 metrics.recording.level = INFO
10438 metrics.sample.window.ms = 30000
10439 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10440 receive.buffer.bytes = 65536
10441 reconnect.backoff.max.ms = 1000
10442 reconnect.backoff.ms = 50
10443 request.timeout.ms = 30000
10444 retry.backoff.max.ms = 1000
10445 retry.backoff.ms = 100
10446 sasl.client.callback.handler.class = null
10447 sasl.jaas.config = null
10448 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10449 sasl.kerberos.min.time.before.relogin = 60000
10450 sasl.kerberos.service.name = null
10451 sasl.kerberos.ticket.renew.jitter = 0.05
10452 sasl.kerberos.ticket.renew.window.factor = 0.8
10453 sasl.login.callback.handler.class = null
10454 sasl.login.class = null
10455 sasl.login.connect.timeout.ms = null
10456 sasl.login.read.timeout.ms = null
10457 sasl.login.refresh.buffer.seconds = 300
10458 sasl.login.refresh.min.period.seconds = 60
10459 sasl.login.refresh.window.factor = 0.8
10460 sasl.login.refresh.window.jitter = 0.05
10461 sasl.login.retry.backoff.max.ms = 10000
10462 sasl.login.retry.backoff.ms = 100
10463 sasl.mechanism = GSSAPI
10464 sasl.oauthbearer.assertion.algorithm = RS256
10465 sasl.oauthbearer.assertion.claim.aud = null
10466 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10467 sasl.oauthbearer.assertion.claim.iss = null
10468 sasl.oauthbearer.assertion.claim.jti.include = false
10469 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10470 sasl.oauthbearer.assertion.claim.sub = null
10471 sasl.oauthbearer.assertion.file = null
10472 sasl.oauthbearer.assertion.private.key.file = null
10473 sasl.oauthbearer.assertion.private.key.passphrase = null
10474 sasl.oauthbearer.assertion.template.file = null
10475 sasl.oauthbearer.client.credentials.client.id = null
10476 sasl.oauthbearer.client.credentials.client.secret = null
10477 sasl.oauthbearer.clock.skew.seconds = 30
10478 sasl.oauthbearer.expected.audience = null
10479 sasl.oauthbearer.expected.issuer = null
10480 sasl.oauthbearer.header.urlencode = false
10481 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10482 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10483 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10484 sasl.oauthbearer.jwks.endpoint.url = null
10485 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10486 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10487 sasl.oauthbearer.scope = null
10488 sasl.oauthbearer.scope.claim.name = scope
10489 sasl.oauthbearer.sub.claim.name = sub
10490 sasl.oauthbearer.token.endpoint.url = null
10491 security.protocol = PLAINTEXT
10492 security.providers = null
10493 send.buffer.bytes = 131072
10494 session.timeout.ms = 45000
10495 share.acknowledgement.mode = implicit
10496 socket.connection.setup.timeout.max.ms = 30000
10497 socket.connection.setup.timeout.ms = 10000
10498 ssl.cipher.suites = null
10499 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10500 ssl.endpoint.identification.algorithm = https
10501 ssl.engine.factory.class = null
10502 ssl.key.password = null
10503 ssl.keymanager.algorithm = SunX509
10504 ssl.keystore.certificate.chain = null
10505 ssl.keystore.key = null
10506 ssl.keystore.location = null
10507 ssl.keystore.password = null
10508 ssl.keystore.type = JKS
10509 ssl.protocol = TLSv1.3
10510 ssl.provider = null
10511 ssl.secure.random.implementation = null
10512 ssl.trustmanager.algorithm = PKIX
10513 ssl.truststore.certificates = null
10514 ssl.truststore.location = null
10515 ssl.truststore.password = null
10516 ssl.truststore.type = JKS
10517 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10518
1051914:31:09.623 [virtual-760] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1052014:31:09.625 [virtual-760] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1052114:31:09.625 [virtual-760] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1052214:31:09.625 [virtual-760] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855069625
1052314:31:09.626 [virtual-761] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Subscribed to topic(s): t8_1
1052414:31:09.629 [virtual-761] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
1052514:31:09.629 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1052614:31:09.630 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1052714:31:09.631 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83 and requesting the member to rejoin with this id.
1052814:31:09.632 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83
1052914:31:09.632 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1053014:31:09.632 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83 joins group g8_1 in Empty state. Adding to the group now.
1053114:31:09.632 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83).
1053214:31:12.633 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 1 with 1 members.
1053314:31:12.633 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83', protocol='range'}
1053414:31:12.634 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Finished assignment for group at generation 1: {consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83=Assignment(partitions=[t8_1-0])}
1053514:31:12.634 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83 for group g8_1 for generation 1. The group has 1 members, 0 of which are static.
1053614:31:12.639 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83', protocol='range'}
1053714:31:12.639 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1053814:31:12.639 [virtual-761] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1053914:31:12.640 [virtual-761] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Found no committed offset for partition t8_1-0
1054014:31:12.642 [virtual-761] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1054114:31:12.644 [virtual-763] ERROR o.k.KafkaFlow$ - Exception when polling for records
10542ox.flow.FlowOps$$anon$1: abort take
1054314:31:12.650 [virtual-768] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1054414:31:12.651 [virtual-768] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Member consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1054514:31:12.651 [virtual-768] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1054614:31:12.651 [virtual-768] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1054714:31:12.651 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1054814:31:12.651 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_1-18-7638777a-7ac4-4d34-8f16-8cf7daba0d83) members.).
1054914:31:12.651 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 2 is now empty.
1055014:31:13.145 [virtual-768] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1055114:31:13.145 [virtual-768] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1055214:31:13.145 [virtual-768] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1055314:31:13.146 [virtual-768] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1055414:31:13.147 [virtual-768] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-18 unregistered
1055514:31:13.148 [virtual-769] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10556 allow.auto.create.topics = true
10557 auto.commit.interval.ms = 5000
10558 auto.offset.reset = earliest
10559 bootstrap.servers = [localhost:6001]
10560 check.crcs = true
10561 client.dns.lookup = use_all_dns_ips
10562 client.id = consumer-g8_1-19
10563 client.rack =
10564 connections.max.idle.ms = 540000
10565 default.api.timeout.ms = 60000
10566 enable.auto.commit = false
10567 enable.metrics.push = true
10568 exclude.internal.topics = true
10569 fetch.max.bytes = 52428800
10570 fetch.max.wait.ms = 500
10571 fetch.min.bytes = 1
10572 group.id = g8_1
10573 group.instance.id = null
10574 group.protocol = classic
10575 group.remote.assignor = null
10576 heartbeat.interval.ms = 3000
10577 interceptor.classes = []
10578 internal.leave.group.on.close = true
10579 internal.throw.on.fetch.stable.offset.unsupported = false
10580 isolation.level = read_uncommitted
10581 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10582 max.partition.fetch.bytes = 1048576
10583 max.poll.interval.ms = 300000
10584 max.poll.records = 500
10585 metadata.max.age.ms = 300000
10586 metadata.recovery.rebootstrap.trigger.ms = 300000
10587 metadata.recovery.strategy = rebootstrap
10588 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10589 metrics.num.samples = 2
10590 metrics.recording.level = INFO
10591 metrics.sample.window.ms = 30000
10592 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10593 receive.buffer.bytes = 65536
10594 reconnect.backoff.max.ms = 1000
10595 reconnect.backoff.ms = 50
10596 request.timeout.ms = 30000
10597 retry.backoff.max.ms = 1000
10598 retry.backoff.ms = 100
10599 sasl.client.callback.handler.class = null
10600 sasl.jaas.config = null
10601 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10602 sasl.kerberos.min.time.before.relogin = 60000
10603 sasl.kerberos.service.name = null
10604 sasl.kerberos.ticket.renew.jitter = 0.05
10605 sasl.kerberos.ticket.renew.window.factor = 0.8
10606 sasl.login.callback.handler.class = null
10607 sasl.login.class = null
10608 sasl.login.connect.timeout.ms = null
10609 sasl.login.read.timeout.ms = null
10610 sasl.login.refresh.buffer.seconds = 300
10611 sasl.login.refresh.min.period.seconds = 60
10612 sasl.login.refresh.window.factor = 0.8
10613 sasl.login.refresh.window.jitter = 0.05
10614 sasl.login.retry.backoff.max.ms = 10000
10615 sasl.login.retry.backoff.ms = 100
10616 sasl.mechanism = GSSAPI
10617 sasl.oauthbearer.assertion.algorithm = RS256
10618 sasl.oauthbearer.assertion.claim.aud = null
10619 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10620 sasl.oauthbearer.assertion.claim.iss = null
10621 sasl.oauthbearer.assertion.claim.jti.include = false
10622 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10623 sasl.oauthbearer.assertion.claim.sub = null
10624 sasl.oauthbearer.assertion.file = null
10625 sasl.oauthbearer.assertion.private.key.file = null
10626 sasl.oauthbearer.assertion.private.key.passphrase = null
10627 sasl.oauthbearer.assertion.template.file = null
10628 sasl.oauthbearer.client.credentials.client.id = null
10629 sasl.oauthbearer.client.credentials.client.secret = null
10630 sasl.oauthbearer.clock.skew.seconds = 30
10631 sasl.oauthbearer.expected.audience = null
10632 sasl.oauthbearer.expected.issuer = null
10633 sasl.oauthbearer.header.urlencode = false
10634 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10635 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10636 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10637 sasl.oauthbearer.jwks.endpoint.url = null
10638 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10639 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10640 sasl.oauthbearer.scope = null
10641 sasl.oauthbearer.scope.claim.name = scope
10642 sasl.oauthbearer.sub.claim.name = sub
10643 sasl.oauthbearer.token.endpoint.url = null
10644 security.protocol = PLAINTEXT
10645 security.providers = null
10646 send.buffer.bytes = 131072
10647 session.timeout.ms = 45000
10648 share.acknowledgement.mode = implicit
10649 socket.connection.setup.timeout.max.ms = 30000
10650 socket.connection.setup.timeout.ms = 10000
10651 ssl.cipher.suites = null
10652 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10653 ssl.endpoint.identification.algorithm = https
10654 ssl.engine.factory.class = null
10655 ssl.key.password = null
10656 ssl.keymanager.algorithm = SunX509
10657 ssl.keystore.certificate.chain = null
10658 ssl.keystore.key = null
10659 ssl.keystore.location = null
10660 ssl.keystore.password = null
10661 ssl.keystore.type = JKS
10662 ssl.protocol = TLSv1.3
10663 ssl.provider = null
10664 ssl.secure.random.implementation = null
10665 ssl.trustmanager.algorithm = PKIX
10666 ssl.truststore.certificates = null
10667 ssl.truststore.location = null
10668 ssl.truststore.password = null
10669 ssl.truststore.type = JKS
10670 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10671
1067214:31:13.148 [virtual-769] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1067314:31:13.149 [virtual-769] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1067414:31:13.150 [virtual-769] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1067514:31:13.150 [virtual-769] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855073149
1067614:31:13.150 [virtual-772] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Subscribed to topic(s): t8_1
1067714:31:13.153 [virtual-772] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
1067814:31:13.153 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1067914:31:13.154 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1068014:31:13.155 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70 and requesting the member to rejoin with this id.
1068114:31:13.156 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70
1068214:31:13.156 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1068314:31:13.157 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70 joins group g8_1 in Empty state. Adding to the group now.
1068414:31:13.157 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70).
1068514:31:16.157 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 3 with 1 members.
1068614:31:16.157 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70', protocol='range'}
1068714:31:16.158 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Finished assignment for group at generation 3: {consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70=Assignment(partitions=[t8_1-0])}
1068814:31:16.158 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70 for group g8_1 for generation 3. The group has 1 members, 0 of which are static.
1068914:31:16.164 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70', protocol='range'}
1069014:31:16.164 [virtual-772] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1069114:31:16.164 [virtual-772] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1069214:31:16.165 [virtual-772] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t8_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
1069314:31:16.168 [virtual-769] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10694 allow.auto.create.topics = true
10695 auto.commit.interval.ms = 5000
10696 auto.offset.reset = earliest
10697 bootstrap.servers = [localhost:6001]
10698 check.crcs = true
10699 client.dns.lookup = use_all_dns_ips
10700 client.id = consumer-g8_2-20
10701 client.rack =
10702 connections.max.idle.ms = 540000
10703 default.api.timeout.ms = 60000
10704 enable.auto.commit = false
10705 enable.metrics.push = true
10706 exclude.internal.topics = true
10707 fetch.max.bytes = 52428800
10708 fetch.max.wait.ms = 500
10709 fetch.min.bytes = 1
10710 group.id = g8_2
10711 group.instance.id = null
10712 group.protocol = classic
10713 group.remote.assignor = null
10714 heartbeat.interval.ms = 3000
10715 interceptor.classes = []
10716 internal.leave.group.on.close = true
10717 internal.throw.on.fetch.stable.offset.unsupported = false
10718 isolation.level = read_uncommitted
10719 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10720 max.partition.fetch.bytes = 1048576
10721 max.poll.interval.ms = 300000
10722 max.poll.records = 500
10723 metadata.max.age.ms = 300000
10724 metadata.recovery.rebootstrap.trigger.ms = 300000
10725 metadata.recovery.strategy = rebootstrap
10726 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10727 metrics.num.samples = 2
10728 metrics.recording.level = INFO
10729 metrics.sample.window.ms = 30000
10730 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10731 receive.buffer.bytes = 65536
10732 reconnect.backoff.max.ms = 1000
10733 reconnect.backoff.ms = 50
10734 request.timeout.ms = 30000
10735 retry.backoff.max.ms = 1000
10736 retry.backoff.ms = 100
10737 sasl.client.callback.handler.class = null
10738 sasl.jaas.config = null
10739 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10740 sasl.kerberos.min.time.before.relogin = 60000
10741 sasl.kerberos.service.name = null
10742 sasl.kerberos.ticket.renew.jitter = 0.05
10743 sasl.kerberos.ticket.renew.window.factor = 0.8
10744 sasl.login.callback.handler.class = null
10745 sasl.login.class = null
10746 sasl.login.connect.timeout.ms = null
10747 sasl.login.read.timeout.ms = null
10748 sasl.login.refresh.buffer.seconds = 300
10749 sasl.login.refresh.min.period.seconds = 60
10750 sasl.login.refresh.window.factor = 0.8
10751 sasl.login.refresh.window.jitter = 0.05
10752 sasl.login.retry.backoff.max.ms = 10000
10753 sasl.login.retry.backoff.ms = 100
10754 sasl.mechanism = GSSAPI
10755 sasl.oauthbearer.assertion.algorithm = RS256
10756 sasl.oauthbearer.assertion.claim.aud = null
10757 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10758 sasl.oauthbearer.assertion.claim.iss = null
10759 sasl.oauthbearer.assertion.claim.jti.include = false
10760 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10761 sasl.oauthbearer.assertion.claim.sub = null
10762 sasl.oauthbearer.assertion.file = null
10763 sasl.oauthbearer.assertion.private.key.file = null
10764 sasl.oauthbearer.assertion.private.key.passphrase = null
10765 sasl.oauthbearer.assertion.template.file = null
10766 sasl.oauthbearer.client.credentials.client.id = null
10767 sasl.oauthbearer.client.credentials.client.secret = null
10768 sasl.oauthbearer.clock.skew.seconds = 30
10769 sasl.oauthbearer.expected.audience = null
10770 sasl.oauthbearer.expected.issuer = null
10771 sasl.oauthbearer.header.urlencode = false
10772 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10773 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10774 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10775 sasl.oauthbearer.jwks.endpoint.url = null
10776 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10777 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10778 sasl.oauthbearer.scope = null
10779 sasl.oauthbearer.scope.claim.name = scope
10780 sasl.oauthbearer.sub.claim.name = sub
10781 sasl.oauthbearer.token.endpoint.url = null
10782 security.protocol = PLAINTEXT
10783 security.providers = null
10784 send.buffer.bytes = 131072
10785 session.timeout.ms = 45000
10786 share.acknowledgement.mode = implicit
10787 socket.connection.setup.timeout.max.ms = 30000
10788 socket.connection.setup.timeout.ms = 10000
10789 ssl.cipher.suites = null
10790 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10791 ssl.endpoint.identification.algorithm = https
10792 ssl.engine.factory.class = null
10793 ssl.key.password = null
10794 ssl.keymanager.algorithm = SunX509
10795 ssl.keystore.certificate.chain = null
10796 ssl.keystore.key = null
10797 ssl.keystore.location = null
10798 ssl.keystore.password = null
10799 ssl.keystore.type = JKS
10800 ssl.protocol = TLSv1.3
10801 ssl.provider = null
10802 ssl.secure.random.implementation = null
10803 ssl.trustmanager.algorithm = PKIX
10804 ssl.truststore.certificates = null
10805 ssl.truststore.location = null
10806 ssl.truststore.password = null
10807 ssl.truststore.type = JKS
10808 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10809
1081014:31:16.169 [virtual-769] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1081114:31:16.170 [virtual-769] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1081214:31:16.170 [virtual-769] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1081314:31:16.171 [virtual-769] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764855076170
1081414:31:16.171 [virtual-776] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Subscribed to topic(s): t8_1
1081514:31:16.173 [virtual-776] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Cluster ID: Rv5ipS8WQ9OWJ9EWetzHMA
1081614:31:16.174 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1081714:31:16.175 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1081814:31:16.177 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_2 in Empty state. Created a new member id consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e and requesting the member to rejoin with this id.
1081914:31:16.177 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: need to re-join with the given member-id: consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e
1082014:31:16.177 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1082114:31:16.178 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e joins group g8_2 in Empty state. Adding to the group now.
1082214:31:16.178 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e).
1082314:31:19.178 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_2 generation 1 with 1 members.
1082414:31:19.178 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e', protocol='range'}
1082514:31:19.179 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Finished assignment for group at generation 1: {consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e=Assignment(partitions=[t8_1-0])}
1082614:31:19.180 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e for group g8_2 for generation 1. The group has 1 members, 0 of which are static.
1082714:31:19.186 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e', protocol='range'}
1082814:31:19.186 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1082914:31:19.186 [virtual-776] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Adding newly assigned partitions: [t8_1-0]
1083014:31:19.187 [virtual-776] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Found no committed offset for partition t8_1-0
1083114:31:19.188 [virtual-776] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1083214:31:19.190 [virtual-771] ERROR o.k.KafkaFlow$ - Exception when polling for records
10833java.lang.InterruptedException: null
10834 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10835 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10836 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10837 at ox.channels.ActorRef.ask(actor.scala:64)
10838 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10839 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10840 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10841 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10842 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10843 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10844 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10845 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10846 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10847 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1084814:31:19.190 [virtual-775] ERROR o.k.KafkaFlow$ - Exception when polling for records
10849java.lang.InterruptedException: null
10850 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10851 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10852 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10853 at ox.channels.ActorRef.ask(actor.scala:64)
10854 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10855 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10856 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10857 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10858 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10859 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10860 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10861 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10862 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10863 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1086414:31:19.190 [virtual-776] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10865java.lang.InterruptedException: null
10866 ... 18 common frames omitted
10867Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10868 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10869 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10870 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10871 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10872 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10873 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10874 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10875 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10876 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10877 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10878 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10879 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10880 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10881 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10882 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10883 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10884 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10885 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1088614:31:19.190 [virtual-772] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10887java.lang.InterruptedException: null
10888 ... 18 common frames omitted
10889Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10890 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10891 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10892 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10893 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10894 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10895 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10896 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10897 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10898 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10899 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10900 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10901 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10902 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10903 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10904 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10905 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10906 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10907 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1090814:31:19.191 [virtual-779] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Revoke previously assigned partitions [t8_1-0]
1090914:31:19.191 [virtual-779] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Member consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1091014:31:19.191 [virtual-778] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1091114:31:19.191 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Member consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1091214:31:19.191 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1091314:31:19.191 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1091414:31:19.191 [virtual-779] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting generation and member id due to: consumer pro-actively leaving the group
1091514:31:19.192 [virtual-779] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: consumer pro-actively leaving the group
1091614:31:19.192 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1091714:31:19.192 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g8_1-19-d7dab71d-d9c9-4b06-88d8-1533f9c6ce70) members.).
1091814:31:19.192 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 4 is now empty.
1091914:31:19.192 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_2] Member consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1092014:31:19.192 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_2-20-a318679e-f4a1-4ceb-b48d-75841afdae7e) members.).
1092114:31:19.192 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_2 with generation 2 is now empty.
1092214:31:19.677 [virtual-778] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1092314:31:19.677 [virtual-778] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1092414:31:19.677 [virtual-778] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1092514:31:19.677 [virtual-778] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1092614:31:19.679 [virtual-778] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-19 unregistered
1092714:31:19.691 [virtual-779] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1092814:31:19.691 [virtual-779] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1092914:31:19.691 [virtual-779] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1093014:31:19.691 [virtual-779] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1093114:31:19.692 [virtual-779] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_2-20 unregistered
1093214:31:19.695 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTED to SHUTTING_DOWN
1093314:31:19.695 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] shutting down
1093414:31:19.696 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Beginning controlled shutdown.
1093514:31:19.696 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] Unfenced broker 0 has requested and been granted a controlled shutdown.
1093614:31:19.700 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] enterControlledShutdown[0]: changing 11 partition(s)
1093714:31:19.700 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=0, inControlledShutdown=1, logDirs=[])
1093814:31:19.727 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker is in PENDING_CONTROLLED_SHUTDOWN state, still waiting for the active controller.
1093914:31:19.728 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 11 partition(s) to local followers.
1094014:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094114:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t6_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094214:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094314:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower __consumer_offsets-0 starts at leader epoch 1 from offset 1056 with partition epoch 1 and high watermark 1056. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094414:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t4-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094514:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t8_1-0 starts at leader epoch 1 from offset 5 with partition epoch 1 and high watermark 5. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094614:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t7_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094714:31:19.733 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094814:31:19.734 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1094914:31:19.734 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1095014:31:19.734 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t2-0 starts at leader epoch 1 from offset 1000 with partition epoch 1 and high watermark 1000. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1095114:31:19.735 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1095214:31:19.735 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1095314:31:19.736 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Stopped fetchers as part of controlled shutdown for 11 partitions
1095414:31:19.737 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling unloading of metadata for __consumer_offsets-0 with epoch OptionalInt[1]
1095514:31:19.737 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Started unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1095614:31:19.737 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_1] Unloading group metadata for generation 4.
1095714:31:19.737 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_2] Unloading group metadata for generation 2.
1095814:31:19.737 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_1] Unloading group metadata for generation 4.
1095914:31:19.737 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_1] Unloading group metadata for generation 4.
1096014:31:19.737 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_1] Unloading group metadata for generation 4.
1096114:31:19.737 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_1] Unloading group metadata for generation 4.
1096214:31:19.738 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g1] Unloading group metadata for generation 2.
1096314:31:19.738 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_2] Unloading group metadata for generation 2.
1096414:31:19.738 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_2] Unloading group metadata for generation 2.
1096514:31:19.738 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_2] Unloading group metadata for generation 2.
1096614:31:19.738 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=embedded-kafka-spec] Unloading group metadata for generation 4.
1096714:31:19.738 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_2] Unloading group metadata for generation 2.
1096814:31:19.738 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1096914:31:19.778 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to shut down has been granted since the lowest active offset 9223372036854775807 is now greater than the broker's controlled shutdown offset 219.
1097014:31:19.780 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=1, inControlledShutdown=0, logDirs=[])
1097114:31:19.806 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The controller has asked us to exit controlled shutdown.
1097214:31:19.806 [broker-0-lifecycle-manager-event-handler] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] beginShutdown: shutting down event queue.
1097314:31:19.807 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Transitioning from PENDING_CONTROLLED_SHUTDOWN to SHUTTING_DOWN.
1097414:31:19.807 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutting down
1097514:31:19.807 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopping socket server request processors
1097614:31:19.808 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Stopped
1097714:31:19.809 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutdown completed
1097814:31:19.811 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for heartbeat shutdown
1097914:31:19.811 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopped socket server request processors
1098014:31:19.812 [pool-67-thread-7] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shutting down
1098114:31:19.813 [pool-67-thread-7] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shut down completely
1098214:31:19.814 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1098314:31:19.814 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1098414:31:19.814 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1098514:31:19.815 [pool-67-thread-7] INFO k.s.KafkaApis - [KafkaApi-0] Shutdown complete.
1098614:31:19.816 [pool-67-thread-7] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutting down.
1098714:31:19.817 [pool-67-thread-7] INFO k.c.t.TransactionStateManager - [Transaction State Manager 0]: Shutdown complete
1098814:31:19.817 [pool-67-thread-7] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutting down
1098914:31:19.817 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Stopped
1099014:31:19.817 [pool-67-thread-7] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutdown completed
1099114:31:19.818 [pool-67-thread-7] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutdown complete.
1099214:31:19.819 [pool-67-thread-7] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutting down.
1099314:31:19.819 [pool-67-thread-7] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Closing coordinator runtime.
1099414:31:19.819 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutting down
1099514:31:19.819 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Stopped
1099614:31:19.820 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutdown completed
1099714:31:19.820 [pool-67-thread-7] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Shutting down event processor.
1099814:31:19.820 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutting down. Draining the remaining events.
1099914:31:19.820 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutdown completed
1100014:31:19.821 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutting down. Draining the remaining events.
1100114:31:19.821 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutdown completed
1100214:31:19.821 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1100314:31:19.821 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutdown completed
1100414:31:19.821 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutting down. Draining the remaining events.
1100514:31:19.821 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutdown completed
1100614:31:19.821 [pool-67-thread-7] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Event processor closed.
1100714:31:19.822 [pool-67-thread-7] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Coordinator runtime closed.
1100814:31:19.823 [pool-67-thread-7] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutdown complete.
1100914:31:19.824 [pool-67-thread-7] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutting down.
1101014:31:19.824 [pool-67-thread-7] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Closing coordinator runtime.
1101114:31:19.824 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutting down
1101214:31:19.825 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Stopped
1101314:31:19.825 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutdown completed
1101414:31:19.826 [pool-67-thread-7] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Shutting down event processor.
1101514:31:19.826 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1101614:31:19.826 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutdown completed
1101714:31:19.826 [pool-67-thread-7] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Event processor closed.
1101814:31:19.826 [pool-67-thread-7] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Coordinator runtime closed.
1101914:31:19.828 [pool-67-thread-7] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutdown complete.
1102014:31:19.828 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]KafkaEventQueue#close: shutting down event queue.
1102114:31:19.828 [broker-0-directory-assignments-manager-event-handler] INFO o.a.k.s.AssignmentsManager - [AssignmentsManager id=0] shutting down.
1102214:31:19.829 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutting down
1102314:31:19.829 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Stopped
1102414:31:19.829 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutdown completed
1102514:31:19.829 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for directory-assignments shutdown
1102614:31:19.829 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]closed event queue.
1102714:31:19.830 [pool-67-thread-7] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shutting down
1102814:31:19.831 [pool-67-thread-7] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutting down
1102914:31:19.831 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Stopped
1103014:31:19.831 [pool-67-thread-7] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutdown completed
1103114:31:19.831 [pool-67-thread-7] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutting down
1103214:31:19.832 [pool-67-thread-7] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutdown completed
1103314:31:19.833 [pool-67-thread-7] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutting down
1103414:31:19.833 [pool-67-thread-7] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutdown completed
1103514:31:19.833 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutting down
1103614:31:19.833 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Stopped
1103714:31:19.833 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutdown completed
1103814:31:19.834 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutting down
1103914:31:19.835 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Stopped
1104014:31:19.835 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutdown completed
1104114:31:19.835 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutting down
1104214:31:19.835 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Stopped
1104314:31:19.836 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutdown completed
1104414:31:19.836 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutting down
1104514:31:19.836 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Stopped
1104614:31:19.836 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutdown completed
1104714:31:19.837 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutting down
1104814:31:19.837 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutdown completed
1104914:31:19.837 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Stopped
1105014:31:19.838 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutting down
1105114:31:19.838 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Stopped
1105214:31:19.838 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutdown completed
1105314:31:19.842 [pool-67-thread-7] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutting down
1105414:31:19.842 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Stopped
1105514:31:19.842 [pool-67-thread-7] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutdown completed
1105614:31:19.843 [pool-67-thread-7] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shut down completely
1105714:31:19.843 [pool-67-thread-7] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutting down
1105814:31:19.843 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Stopped
1105914:31:19.843 [pool-67-thread-7] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutdown completed
1106014:31:19.844 [pool-67-thread-7] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for alter-partition shutdown
1106114:31:19.844 [pool-67-thread-7] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutting down
1106214:31:19.844 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Stopped
1106314:31:19.844 [pool-67-thread-7] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutdown completed
1106414:31:19.845 [pool-67-thread-7] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for forwarding shutdown
1106514:31:19.845 [pool-67-thread-7] INFO k.l.LogManager - Shutting down.
1106614:31:19.846 [pool-67-thread-7] INFO o.a.k.s.i.l.LogCleaner - Shutting down the log cleaner.
1106714:31:19.846 [pool-67-thread-7] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutting down
1106814:31:19.846 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Stopped
1106914:31:19.847 [pool-67-thread-7] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutdown completed
1107014:31:19.852 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 1 ms.
1107114:31:19.852 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 1 ms.
1107214:31:19.855 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__consumer_offsets-0] Wrote producer snapshot at offset 1056 with 0 producer ids in 1 ms.
1107314:31:19.856 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t2-0] Wrote producer snapshot at offset 1000 with 1 producer ids in 1 ms.
1107414:31:19.857 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1107514:31:19.858 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1107614:31:19.859 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t7_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 0 ms.
1107714:31:19.860 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t4-0] Wrote producer snapshot at offset 3 with 1 producer ids in 0 ms.
1107814:31:19.860 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t8_1-0] Wrote producer snapshot at offset 5 with 5 producer ids in 0 ms.
1107914:31:19.861 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1108014:31:19.862 [log-closing-/tmp/kafka-logs7572554700115704093] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t6_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1108114:31:19.901 [pool-67-thread-7] INFO k.l.LogManager - Shutdown complete.
1108214:31:19.902 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutting down
1108314:31:19.903 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Stopped
1108414:31:19.903 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1108514:31:19.903 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutting down
1108614:31:19.903 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Stopped
1108714:31:19.903 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutdown completed
1108814:31:19.904 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutting down
1108914:31:19.904 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Stopped
1109014:31:19.904 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutdown completed
1109114:31:19.904 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1109214:31:19.904 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1109314:31:19.904 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1109414:31:19.905 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutting down socket server
1109514:31:19.926 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutdown completed
1109614:31:19.927 [pool-67-thread-7] INFO o.a.k.s.l.m.BrokerTopicStats - Broker and topic stats closed
1109714:31:19.927 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutting down
1109814:31:19.928 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Stopped
1109914:31:19.928 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutdown completed
1110014:31:19.930 [pool-67-thread-7] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutting down
1110114:31:19.930 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Stopped
1110214:31:19.930 [pool-67-thread-7] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutdown completed
1110314:31:19.931 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutting down
1110414:31:19.931 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Stopped
1110514:31:19.931 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutdown completed
1110614:31:19.933 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] closed event queue.
1110714:31:19.933 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutting down
1110814:31:19.934 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Stopped
1110914:31:19.934 [pool-67-thread-7] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutdown completed
1111014:31:19.935 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] shut down completed
1111114:31:19.935 [pool-67-thread-7] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTTING_DOWN to SHUTDOWN
1111214:31:19.936 [pool-67-thread-7] INFO k.s.ControllerServer - [ControllerServer id=0] shutting down
1111314:31:19.936 [pool-67-thread-7] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutting down
1111414:31:20.052 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Stopped
1111514:31:20.052 [pool-67-thread-7] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutdown completed
1111614:31:20.052 [pool-67-thread-7] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutting down
1111714:31:20.052 [pool-67-thread-7] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Beginning graceful shutdown
1111814:31:20.053 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Graceful shutdown completed
1111914:31:20.053 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Stopped
1112014:31:20.053 [pool-67-thread-7] INFO o.a.k.r.KafkaRaftClientDriver - [RaftManager id=0] Completed graceful shutdown of RaftClient
1112114:31:20.053 [pool-67-thread-7] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutdown completed
1112214:31:20.054 [pool-67-thread-7] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutting down
1112314:31:20.054 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Stopped
1112414:31:20.054 [pool-67-thread-7] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutdown completed
1112514:31:20.055 [pool-67-thread-7] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__cluster_metadata-0] Wrote producer snapshot at offset 222 with 0 producer ids in 1 ms.
1112614:31:20.057 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] beginShutdown: shutting down event queue.
1112714:31:20.057 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] shutting down.
1112814:31:20.057 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutting down
1112914:31:20.057 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Stopped
1113014:31:20.057 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1113114:31:20.058 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1113214:31:20.058 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] closed event queue.
1113314:31:20.058 [pool-67-thread-7] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1113414:31:20.058 [pool-67-thread-7] WARN o.a.k.c.NetworkClient - [NodeToControllerChannelManager id=0 name=registration] Attempting to close NetworkClient that has already been closed.
1113514:31:20.058 [pool-67-thread-7] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1113614:31:20.059 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=tHYPO5N7Ql6gyQ8C-tlBnA] closed event queue.
1113714:31:20.060 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopping socket server request processors
1113814:31:20.061 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopped socket server request processors
1113914:31:20.061 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] QuorumController#beginShutdown: shutting down event queue.
1114014:31:20.062 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutting down socket server
1114114:31:20.062 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] writeNoOpRecord: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1114214:31:20.062 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] maybeFenceStaleBroker: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1114314:31:20.062 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] generatePeriodicPerformanceMessage: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1114414:31:20.062 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electPreferred: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1114514:31:20.063 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electUnclean: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1114614:31:20.063 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] expireDelegationTokens: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1114714:31:20.068 [pool-67-thread-7] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutdown completed
1114814:31:20.068 [pool-67-thread-7] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shutting down
1114914:31:20.070 [pool-67-thread-7] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shut down completely
1115014:31:20.070 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1115114:31:20.070 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1115214:31:20.071 [pool-67-thread-7] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1115314:31:20.071 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutting down
1115414:31:20.071 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Stopped
1115514:31:20.071 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1115614:31:20.072 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutting down
1115714:31:20.072 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Stopped
1115814:31:20.072 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutdown completed
1115914:31:20.072 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutting down
1116014:31:20.072 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Stopped
1116114:31:20.072 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutdown completed
1116214:31:20.073 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1116314:31:20.073 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1116414:31:20.073 [pool-67-thread-7] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1116514:31:20.073 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] closed event queue.
1116614:31:20.074 [pool-67-thread-7] INFO k.s.SharedServer - [SharedServer id=0] Stopping SharedServer
1116714:31:20.075 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] beginShutdown: shutting down event queue.
1116814:31:20.075 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] beginShutdown: shutting down event queue.
1116914:31:20.075 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1117014:31:20.075 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] closed event queue.
1117114:31:20.076 [pool-67-thread-7] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1117214:31:20.076 [pool-67-thread-7] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1117314:31:20.077 [pool-67-thread-7] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1117414:31:20.077 [pool-67-thread-7] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1117514:31:20.077 [pool-67-thread-7] INFO o.a.k.c.u.AppInfoParser - App info kafka.server for 0 unregistered
11176[info] KafkaTest:
11177[info] source
11178[info] - should receive messages from a topic
11179[info] stage
11180[info] - should publish messages to a topic
11181[info] stage
11182[info] - should commit offsets of processed messages
11183[info] drain
11184[info] - should publish messages to a topic
11185[info] drain
11186[info] - should commit offsets of processed messages
11187[info] drain
11188[info] - should commit offsets using runCommit
11189[info] stage
11190[info] - should commit offsets using mapCommit
11191[info] stage
11192[info] - should commit offsets when consuming a finite stream using take
11193
11194************************
11195Build summary:
11196[{
11197 "module": "flow-reactive-streams",
11198 "compile": {"status": "ok", "tookMs": 19595, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11199 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11200 "test-compile": {"status": "ok", "tookMs": 319, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11201 "test": {"status": "ok", "tookMs": 211, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11202 "publish": {"status": "skipped", "tookMs": 0},
11203 "metadata": {
11204 "crossScalaVersions": ["2.12.20"]
11205}
11206},{
11207 "module": "mdc-logback",
11208 "compile": {"status": "ok", "tookMs": 707, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11209 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11210 "test-compile": {"status": "ok", "tookMs": 1227, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11211 "test": {"status": "ok", "tookMs": 586, "passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1, "byFramework": [{"framework": "unknown", "stats": {"passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1}}]},
11212 "publish": {"status": "skipped", "tookMs": 0},
11213 "metadata": {
11214 "crossScalaVersions": ["2.12.20"]
11215}
11216},{
11217 "module": "core",
11218 "compile": {"status": "ok", "tookMs": 64, "warnings": 13, "errors": 0, "sourceVersion": "3.8"},
11219 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11220 "test-compile": {"status": "ok", "tookMs": 21524, "warnings": 20, "errors": 0, "sourceVersion": "3.8"},
11221 "test": {"status": "ok", "tookMs": 149877, "passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802, "byFramework": [{"framework": "unknown", "stats": {"passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802}}]},
11222 "publish": {"status": "skipped", "tookMs": 0},
11223 "metadata": {
11224 "crossScalaVersions": ["2.12.20"]
11225}
11226},{
11227 "module": "cron",
11228 "compile": {"status": "ok", "tookMs": 373, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11229 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11230 "test-compile": {"status": "ok", "tookMs": 771, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11231 "test": {"status": "ok", "tookMs": 4171, "passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3, "byFramework": [{"framework": "unknown", "stats": {"passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3}}]},
11232 "publish": {"status": "skipped", "tookMs": 0},
11233 "metadata": {
11234 "crossScalaVersions": ["2.12.20"]
11235}
11236},{
11237 "module": "otel-context",
11238 "compile": {"status": "ok", "tookMs": 254, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11239 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11240 "test-compile": {"status": "ok", "tookMs": 160, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11241 "test": {"status": "ok", "tookMs": 174, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11242 "publish": {"status": "skipped", "tookMs": 0},
11243 "metadata": {
11244 "crossScalaVersions": ["2.12.20"]
11245}
11246},{
11247 "module": "kafka",
11248 "compile": {"status": "ok", "tookMs": 927, "warnings": 1, "errors": 0, "sourceVersion": "3.8"},
11249 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11250 "test-compile": {"status": "ok", "tookMs": 1595, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11251 "test": {"status": "ok", "tookMs": 89490, "passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8, "byFramework": [{"framework": "unknown", "stats": {"passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8}}]},
11252 "publish": {"status": "skipped", "tookMs": 0},
11253 "metadata": {
11254 "crossScalaVersions": ["2.12.20"]
11255}
11256}]
11257************************
11258[success] Total time: 304 s (0:05:04.0), completed Dec 4, 2025, 2:31:20 PM
11259[0JChecking patch project/plugins.sbt...
11260Checking patch build.sbt...
11261Applied patch project/plugins.sbt cleanly.
11262Applied patch build.sbt cleanly.