Build Logs

softwaremill/ox • 3.8.0-RC2:2025-11-28

Errors

231

Warnings

473

Total Lines

11242

1##################################
2Clonning https://github.com/softwaremill/ox.git into /build/repo using revision v1.0.2
3##################################
4Note: switching to '9cb2ebc1df12e84198f24d8429f0ed135892788e'.
5
6You are in 'detached HEAD' state. You can look around, make experimental
7changes and commit them, and you can discard any commits you make in this
8state without impacting any branches by switching back to a branch.
9
10If you want to create a new branch to retain commits you create, you may
11do so (now or later) by using -c with the switch command. Example:
12
13 git switch -c <new-branch-name>
14
15Or undo this operation with:
16
17 git switch -
18
19Turn off this advice by setting config variable advice.detachedHead to false
20
21Using target Scala version for migration: 3.7.4
22Migrating project for -source:3.7 using Scala 3.7.4
23----
24Preparing build for 3.7.4
25Would try to apply common scalacOption (best-effort, sbt/mill only):
26Append: -rewrite,REQUIRE:-source:3.7-migration
27Remove: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
28----
29Starting build for 3.7.4
30Execute tests: false
31sbt project found:
32No prepare script found for project softwaremill/ox
33##################################
34Scala version: 3.7.4
35Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
36Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
37##################################
38Using extra scalacOptions: -rewrite,REQUIRE:-source:3.7-migration
39Filtering out scalacOptions: -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
40[sbt_options] declare -a sbt_options=()
41[process_args] java_version = '21'
42[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
43# Executing command line:
44java
45-Dfile.encoding=UTF-8
46-Dcommunitybuild.scala=3.7.4
47-Dcommunitybuild.project.dependencies.add=
48-Xmx7G
49-Xms4G
50-Xss8M
51-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
52-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
53-jar
54/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
55"setCrossScalaVersions 3.7.4"
56"++3.7.4 -v"
57"mapScalacOptions "-rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s" "-indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
58"set every credentials := Nil"
59"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
60"removeScalacOptionsStartingWith -P:wartremover"
61
62moduleMappings
63"runBuild 3.7.4 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
64
65[info] [launcher] getting org.scala-sbt sbt 1.11.7 (this may take some time)...
66[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
67[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
68[info] loading project definition from /build/repo/project
69[info] compiling 2 Scala sources to /build/repo/project/target/scala-2.12/sbt-1.0/classes ...
70[info] Non-compiled module 'compiler-bridge_2.12' for Scala 2.12.20. Compiling...
71[info] Compilation completed in 8.465s.
72[info] done compiling
73[info] loading settings for project rootProject from build.sbt...
74[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
75[info] set current project to ox (in build file:/build/repo/)
76Execute setCrossScalaVersions: 3.7.4
77OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in cron/crossScalaVersions
78OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in documentation/crossScalaVersions
79OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in core/crossScalaVersions
80OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in core/crossScalaVersions
81OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in flowReactiveStreams/crossScalaVersions
82OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in flowReactiveStreams/crossScalaVersions
83OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in rootProject/crossScalaVersions
84OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in rootProject/crossScalaVersions
85OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in kafka/crossScalaVersions
86OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in kafka/crossScalaVersions
87OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in otelContext/crossScalaVersions
88OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in otelContext/crossScalaVersions
89OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in documentation/crossScalaVersions
90OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in cron/crossScalaVersions
91[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
92OpenCB::Changing crossVersion 3.3.7 -> 3.7.4 in mdcLogback/crossScalaVersions
93OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.7.4) in mdcLogback/crossScalaVersions
94[info] set current project to ox (in build file:/build/repo/)
95[info] Setting Scala version to 3.7.4 on 8 projects.
96[info] Switching Scala version on:
97[info] flowReactiveStreams (3.7.4)
98[info] documentation (3.7.4)
99[info] cron (3.7.4)
100[info] * rootProject (3.7.4)
101[info] mdcLogback (3.7.4)
102[info] kafka (3.7.4)
103[info] core (3.7.4)
104[info] otelContext (3.7.4)
105[info] Excluding projects:
106[info] Reapplying settings...
107[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
108[info] set current project to ox (in build file:/build/repo/)
109Execute mapScalacOptions: -rewrite,REQUIRE:-source:3.7-migration,-Wconf:msg=can be rewritten automatically under:s -indent,-no-indent,-new-syntax,,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
110[info] Reapplying settings...
111[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
112[info] set current project to ox (in build file:/build/repo/)
113[info] Defining Global / credentials, core / credentials and 6 others.
114[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
115[info] Run `last` for details.
116[info] Reapplying settings...
117[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
118[info] set current project to ox (in build file:/build/repo/)
119Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
120[info] Reapplying settings...
121OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
122
123 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
124 Did you mean flowReactiveStreams / allExcludeDependencies ?
125 , retry without global scopes
126[info] Reapplying settings...
127[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
128[info] set current project to ox (in build file:/build/repo/)
129Execute removeScalacOptionsStartingWith: -P:wartremover
130[info] Reapplying settings...
131[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.1)
132[info] set current project to ox (in build file:/build/repo/)
133[success] Total time: 0 s, completed Nov 28, 2025, 11:59:17 AM
134Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"compile-only","migrationVersions":["3.7"],"sourcePatches":[]}
135Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),CompileOnly,List()))
136Starting build...
137Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
138Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
139OpenCB::Exclude Scala3 specific scalacOption `-rewrite` in Scala 2.12.20 module Global
140OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.7-migration` in Scala 2.12.20 module Global
141OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
142OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
143Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
144[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
145[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
146[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
147[warn] | ^
148[warn] | unused implicit parameter
149[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
150[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
151[warn] | ^
152[warn] | unused implicit parameter
153[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
154[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
155[warn] | ^
156[warn] | unused explicit parameter
157[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
158[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
159[warn] | ^
160[warn] | unused explicit parameter
161[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/channels/SourceOps.scala:5:12
162[warn] 5 |import java.util
163[warn] | ^^^^
164[warn] | unused import
165[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
166[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
167[warn] | ^^^^
168[warn] | unused explicit parameter
169[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:5:10
170[warn] 5 |import ox.Ox
171[warn] | ^^
172[warn] | unused import
173[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:616:8
174[warn] 616 | tap(t => sleep(emitEveryMillis))
175[warn] | ^
176[warn] | unused explicit parameter
177[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:947:53
178[warn] 947 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
179[warn] | ^^^^
180[warn] | unused explicit parameter
181[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala:11:10
182[warn] 11 |import ox.fork
183[warn] | ^^^^
184[warn] | unused import
185[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
186[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
187[warn] | ^^^^^^
188[warn] | unused explicit parameter
189[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
190[warn] 123 | private var successCalls = 0
191[warn] | ^^^^^^^^^^^^
192[warn] | private variable was mutated but not read
193[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
194[warn] 167 | private var successCalls = 0
195[warn] | ^^^^^^^^^^^^
196[warn] | private variable was mutated but not read
197[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:414:15
198[warn] 414 | case Nested(t) =>
199[warn] | ^
200[warn] |the type test for Nested cannot be checked at runtime because it's a local class
201[warn] |
202[warn] | longer explanation available when compiling with `-explain`
203[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
204[warn] 150 | case FromParent(t) =>
205[warn] | ^
206[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
207[warn] |
208[warn] | longer explanation available when compiling with `-explain`
209[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
210[warn] 154 | case ChildDone(v) =>
211[warn] | ^
212[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
213[warn] |
214[warn] | longer explanation available when compiling with `-explain`
215[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowOps.scala]
216[info] [patched file /build/repo/core/src/main/scala/ox/local.scala]
217[info] [patched file /build/repo/core/src/main/scala/ox/flow/FlowReactiveOps.scala]
218[info] [patched file /build/repo/core/src/main/scala/ox/channels/SourceOps.scala]
219[info] [patched file /build/repo/core/src/main/scala/ox/oxThreadFactory.scala]
220[warn] 16 warnings found
221[info] done compiling
222[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.7.4/classes ...
223[info] done compiling
224[info] compiling 5 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
225[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
226[warn] 615 | tap(t => sleep(emitEveryMillis))
227[warn] | ^
228[warn] | unused explicit parameter
229[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
230[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
231[warn] | ^^^^
232[warn] | unused explicit parameter
233[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
234[warn] 413 | case Nested(t) =>
235[warn] | ^
236[warn] |the type test for Nested cannot be checked at runtime because it's a local class
237[warn] |
238[warn] | longer explanation available when compiling with `-explain`
239[warn] three warnings found
240[info] done compiling
241[info] compiling 1 Scala source to /build/repo/core/target/scala-3.7.4/classes ...
242[warn] three warnings found
243[info] done compiling
244[info] compiling 25 Scala sources to /build/repo/core/target/scala-3.7.4/classes ...
245[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
246[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
247[warn] | ^^^^
248[warn] | unused explicit parameter
249[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
250[warn] 123 | private var successCalls = 0
251[warn] | ^^^^^^^^^^^^
252[warn] | private variable was mutated but not read
253[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
254[warn] 167 | private var successCalls = 0
255[warn] | ^^^^^^^^^^^^
256[warn] | private variable was mutated but not read
257[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
258[warn] 150 | case FromParent(t) =>
259[warn] | ^
260[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
261[warn] |
262[warn] | longer explanation available when compiling with `-explain`
263[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
264[warn] 154 | case ChildDone(v) =>
265[warn] | ^
266[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
267[warn] |
268[warn] | longer explanation available when compiling with `-explain`
269[warn] 8 warnings found
270[info] done compiling
271Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
272Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
273[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/classes ...
274[info] done compiling
275[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.7.4/test-classes ...
276[info] done compiling
277Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
278Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
279[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
280[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
281[warn] 43 | def transformation(i: Int) =
282[warn] | ^
283[warn] | unused explicit parameter
284[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
285[warn] 38 | def transformation(i: Int) =
286[warn] | ^
287[warn] | unused explicit parameter
288[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
289[warn] 43 | def transformation(i: Int) =
290[warn] | ^
291[warn] | unused explicit parameter
292[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:9:27
293[warn] 9 |import scala.util.boundary.*
294[warn] | ^
295[warn] | unused import
296[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:29
297[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
298[warn] | ^^^^^^^^^
299[warn] | unused import
300[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/OxAppTest.scala:13:40
301[warn] 13 |import java.util.concurrent.{Semaphore, TimeUnit}
302[warn] | ^^^^^^^^
303[warn] | unused import
304[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
305[warn] 80 | (1 to 5).map(i =>
306[warn] | ^
307[warn] | unused explicit parameter
308[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
309[warn] 126 | use(new TestResource, _.release()) { r =>
310[warn] | ^
311[warn] | unused explicit parameter
312[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
313[warn] 140 | useCloseable(new TestResource) { r =>
314[warn] | ^
315[warn] | unused explicit parameter
316[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
317[warn] 157 | use(new TestResource, _.release()) { r =>
318[warn] | ^
319[warn] | unused explicit parameter
320[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:3:43
321[warn] 3 |import org.scalatest.concurrent.Eventually.*
322[warn] | ^
323[warn] | unused import
324[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:3:43
325[warn] 3 |import org.scalatest.concurrent.Eventually.*
326[warn] | ^
327[warn] | unused import
328[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:387:44
329[warn] 387 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
330[warn] | ^
331[warn] | unused implicit parameter
332[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala:5:10
333[warn] 5 |import ox.*
334[warn] | ^
335[warn] | unused import
336[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala:5:10
337[warn] 5 |import ox.*
338[warn] | ^
339[warn] | unused import
340[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala:5:10
341[warn] 5 |import ox.*
342[warn] | ^
343[warn] | unused import
344[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala:5:10
345[warn] 5 |import ox.*
346[warn] | ^
347[warn] | unused import
348[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala:5:10
349[warn] 5 |import ox.*
350[warn] | ^
351[warn] | unused import
352[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala:5:10
353[warn] 5 |import ox.*
354[warn] | ^
355[warn] | unused import
356[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala:5:10
357[warn] 5 |import ox.*
358[warn] | ^
359[warn] | unused import
360[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala:5:10
361[warn] 5 |import ox.*
362[warn] | ^
363[warn] | unused import
364[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala:6:10
365[warn] 6 |import ox.*
366[warn] | ^
367[warn] | unused import
368[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala:5:10
369[warn] 5 |import ox.*
370[warn] | ^
371[warn] | unused import
372[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala:5:10
373[warn] 5 |import ox.*
374[warn] | ^
375[warn] | unused import
376[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala:5:10
377[warn] 5 |import ox.*
378[warn] | ^
379[warn] | unused import
380[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
381[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
382[warn] | ^
383[warn] | unused explicit parameter
384[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
385[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
386[warn] | ^
387[warn] | unused explicit parameter
388[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
389[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
390[warn] | ^
391[warn] | unused explicit parameter
392[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
393[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
394[warn] | ^
395[warn] | unused explicit parameter
396[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
397[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
398[warn] | ^
399[warn] | unused explicit parameter
400[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
401[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
402[warn] | ^
403[warn] | unused explicit parameter
404[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
405[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
406[warn] | ^
407[warn] | unused explicit parameter
408[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
409[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
410[warn] | ^
411[warn] | unused explicit parameter
412[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala:6:10
413[warn] 6 |import ox.*
414[warn] | ^
415[warn] | unused import
416[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala:5:10
417[warn] 5 |import ox.*
418[warn] | ^
419[warn] | unused import
420[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala:5:10
421[warn] 5 |import ox.*
422[warn] | ^
423[warn] | unused import
424[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala:5:10
425[warn] 5 |import ox.*
426[warn] | ^
427[warn] | unused import
428[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala:5:10
429[warn] 5 |import ox.*
430[warn] | ^
431[warn] | unused import
432[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala:5:10
433[warn] 5 |import ox.*
434[warn] | ^
435[warn] | unused import
436[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala:5:10
437[warn] 5 |import ox.*
438[warn] | ^
439[warn] | unused import
440[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala:5:10
441[warn] 5 |import ox.*
442[warn] | ^
443[warn] | unused import
444[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala:5:10
445[warn] 5 |import ox.*
446[warn] | ^
447[warn] | unused import
448[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala:5:10
449[warn] 5 |import ox.*
450[warn] | ^
451[warn] | unused import
452[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala:5:10
453[warn] 5 |import ox.*
454[warn] | ^
455[warn] | unused import
456[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala:6:10
457[warn] 6 |import ox.*
458[warn] | ^
459[warn] | unused import
460[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala:5:10
461[warn] 5 |import ox.*
462[warn] | ^
463[warn] | unused import
464[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala:5:10
465[warn] 5 |import ox.*
466[warn] | ^
467[warn] | unused import
468[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
469[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
470[warn] | ^^^^^^^
471[warn] | unused explicit parameter
472[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
473[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
474[warn] | ^^^^^^^
475[warn] | unused explicit parameter
476[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala]
477[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala]
478[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala]
479[info] [patched file /build/repo/core/src/test/scala/ox/OxAppTest.scala]
480[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastTest.scala]
481[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala]
482[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitTest.scala]
483[info] [patched file /build/repo/core/src/test/scala/ox/CollectParTest.scala]
484[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala]
485[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala]
486[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala]
487[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala]
488[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala]
489[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala]
490[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala]
491[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala]
492[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsSplitOnTest.scala]
493[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala]
494[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala]
495[info] [patched file /build/repo/core/src/test/scala/ox/FilterParTest.scala]
496[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsZipWithIndexTest.scala]
497[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala]
498[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala]
499[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala]
500[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala]
501[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala]
502[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala]
503[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsDropTest.scala]
504[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsMapTest.scala]
505[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala]
506[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowOpsScanTest.scala]
507[info] [patched file /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala]
508[info] [patched file /build/repo/core/src/test/scala/ox/MapParTest.scala]
509[warn] 49 warnings found
510[info] done compiling
511[info] compiling 33 Scala sources to /build/repo/core/target/scala-3.7.4/test-classes ...
512[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
513[warn] 43 | def transformation(i: Int) =
514[warn] | ^
515[warn] | unused explicit parameter
516[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
517[warn] 43 | def transformation(i: Int) =
518[warn] | ^
519[warn] | unused explicit parameter
520[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
521[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
522[warn] | ^
523[warn] | unused implicit parameter
524[warn] three warnings found
525[info] done compiling
526Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
527Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
528[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/classes ...
529[info] done compiling
530[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
531[warn] -- [E198] Unused Symbol Warning: /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala:7:33
532[warn] 7 |import scala.concurrent.duration.*
533[warn] | ^
534[warn] | unused import
535[info] [patched file /build/repo/cron/src/test/scala/ox/scheduling/cron/CronScheduleTest.scala]
536[warn] one warning found
537[info] done compiling
538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.7.4/test-classes ...
539[info] done compiling
540Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
541Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
542[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.7.4/classes ...
543[info] done compiling
544Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
545Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -rewrite, -Wconf:msg=can be rewritten automatically under:s, -source:3.7-migration
546[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
547[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
548[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
549[warn] | ^^^^^^^
550[warn] | unused explicit parameter
551[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala:3:41
552[warn] 3 |import org.apache.kafka.clients.consumer.ConsumerRecord
553[warn] | ^^^^^^^^^^^^^^
554[warn] | unused import
555[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/kafkaOffsetCommit.scala]
556[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaFlow.scala]
557[info] [patched file /build/repo/kafka/src/main/scala/ox/kafka/KafkaStage.scala]
558[warn] two warnings found
559[info] done compiling
560[info] compiling 3 Scala sources to /build/repo/kafka/target/scala-3.7.4/classes ...
561[info] done compiling
562[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.7.4/test-classes ...
563[info] [patched file /build/repo/kafka/src/test/scala/ox/kafka/KafkaTest.scala]
564[info] done compiling
565[info] compiling 1 Scala source to /build/repo/kafka/target/scala-3.7.4/test-classes ...
566[info] done compiling
567
568************************
569Build summary:
570[{
571 "module": "flow-reactive-streams",
572 "compile": {"status": "ok", "tookMs": 16929, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
573 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
574 "test-compile": {"status": "ok", "tookMs": 6754, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
575 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
576 "publish": {"status": "skipped", "tookMs": 0},
577 "metadata": {
578 "crossScalaVersions": ["2.12.20"]
579}
580},{
581 "module": "mdc-logback",
582 "compile": {"status": "ok", "tookMs": 494, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
583 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
584 "test-compile": {"status": "ok", "tookMs": 830, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
585 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
586 "publish": {"status": "skipped", "tookMs": 0},
587 "metadata": {
588 "crossScalaVersions": ["2.12.20"]
589}
590},{
591 "module": "core",
592 "compile": {"status": "ok", "tookMs": 54, "warnings": 16, "errors": 0, "sourceVersion": "3.7-migration"},
593 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
594 "test-compile": {"status": "ok", "tookMs": 20568, "warnings": 49, "errors": 0, "sourceVersion": "3.7-migration"},
595 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
596 "publish": {"status": "skipped", "tookMs": 0},
597 "metadata": {
598 "crossScalaVersions": ["2.12.20"]
599}
600},{
601 "module": "cron",
602 "compile": {"status": "ok", "tookMs": 403, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
603 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
604 "test-compile": {"status": "ok", "tookMs": 749, "warnings": 1, "errors": 0, "sourceVersion": "3.7-migration"},
605 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
606 "publish": {"status": "skipped", "tookMs": 0},
607 "metadata": {
608 "crossScalaVersions": ["2.12.20"]
609}
610},{
611 "module": "otel-context",
612 "compile": {"status": "ok", "tookMs": 180, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
613 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
614 "test-compile": {"status": "ok", "tookMs": 148, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
615 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
616 "publish": {"status": "skipped", "tookMs": 0},
617 "metadata": {
618 "crossScalaVersions": ["2.12.20"]
619}
620},{
621 "module": "kafka",
622 "compile": {"status": "ok", "tookMs": 702, "warnings": 2, "errors": 0, "sourceVersion": "3.7-migration"},
623 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
624 "test-compile": {"status": "ok", "tookMs": 1665, "warnings": 0, "errors": 0, "sourceVersion": "3.7-migration"},
625 "test": {"status": "skipped", "tookMs": 0, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
626 "publish": {"status": "skipped", "tookMs": 0},
627 "metadata": {
628 "crossScalaVersions": ["2.12.20"]
629}
630}]
631************************
632[success] Total time: 113 s (0:01:53.0), completed Nov 28, 2025, 12:01:10 PM
633[0JChecking patch project/plugins.sbt...
634Checking patch build.sbt...
635Applied patch project/plugins.sbt cleanly.
636Applied patch build.sbt cleanly.
637Commit migration rewrites
638Switched to a new branch 'opencb/migrate-source-3.7'
639[opencb/migrate-source-3.7 db3733b] Apply Scala compiler rewrites using -source:3.7-migration using Scala 3.7.4
640 43 files changed, 24 insertions(+), 60 deletions(-)
641----
642Preparing build for 3.8.0-RC2
643Scala binary version found: 3.8
644Implicitly using source version 3.8
645Scala binary version found: 3.8
646Implicitly using source version 3.8
647Would try to apply common scalacOption (best-effort, sbt/mill only):
648Append: ,REQUIRE:-source:3.8
649Remove: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
650----
651Starting build for 3.8.0-RC2
652Execute tests: true
653sbt project found:
654No prepare script found for project softwaremill/ox
655##################################
656Scala version: 3.8.0-RC2
657Targets: com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context
658Project projectConfig: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
659##################################
660Using extra scalacOptions: ,REQUIRE:-source:3.8
661Filtering out scalacOptions: ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
662[sbt_options] declare -a sbt_options=()
663[process_args] java_version = '21'
664[copyRt] java9_rt = '/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21/rt.jar'
665# Executing command line:
666java
667-Dfile.encoding=UTF-8
668-Dcommunitybuild.scala=3.8.0-RC2
669-Dcommunitybuild.project.dependencies.add=
670-Xmx7G
671-Xms4G
672-Xss8M
673-Dsbt.script=/root/.sdkman/candidates/sbt/current/bin/sbt
674-Dscala.ext.dirs=/root/.sbt/1.0/java9-rt-ext-eclipse_adoptium_21
675-jar
676/root/.sdkman/candidates/sbt/1.11.5/bin/sbt-launch.jar
677"setCrossScalaVersions 3.8.0-RC2"
678"++3.8.0-RC2 -v"
679"mapScalacOptions ",REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s" ",-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e""
680"set every credentials := Nil"
681"excludeLibraryDependency com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}"
682"removeScalacOptionsStartingWith -P:wartremover"
683
684moduleMappings
685"runBuild 3.8.0-RC2 """{"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}""" com.softwaremill.ox%core com.softwaremill.ox%cron com.softwaremill.ox%flow-reactive-streams com.softwaremill.ox%kafka com.softwaremill.ox%mdc-logback com.softwaremill.ox%otel-context"
686
687[info] welcome to sbt 1.11.7 (Eclipse Adoptium Java 21)
688[info] loading settings for project repo-build from akka.sbt, plugins.sbt...
689[info] loading project definition from /build/repo/project
690[info] loading settings for project rootProject from build.sbt...
691[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
692[info] set current project to ox (in build file:/build/repo/)
693Execute setCrossScalaVersions: 3.8.0-RC2
694[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
695OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in mdcLogback/crossScalaVersions
696OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in mdcLogback/crossScalaVersions
697OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in core/crossScalaVersions
698OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in core/crossScalaVersions
699OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in flowReactiveStreams/crossScalaVersions
700OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in flowReactiveStreams/crossScalaVersions
701OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in cron/crossScalaVersions
702OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in cron/crossScalaVersions
703OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in documentation/crossScalaVersions
704OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in documentation/crossScalaVersions
705OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in rootProject/crossScalaVersions
706OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in rootProject/crossScalaVersions
707OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in kafka/crossScalaVersions
708OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in kafka/crossScalaVersions
709OpenCB::Changing crossVersion 3.3.7 -> 3.8.0-RC2 in otelContext/crossScalaVersions
710OpenCB::Limitting incorrect crossVersions List(2.12.20) -> List(3.8.0-RC2) in otelContext/crossScalaVersions
711[info] set current project to ox (in build file:/build/repo/)
712[info] Setting Scala version to 3.8.0-RC2 on 8 projects.
713[info] Switching Scala version on:
714[info] flowReactiveStreams (3.8.0-RC2)
715[info] documentation (3.8.0-RC2)
716[info] cron (3.8.0-RC2)
717[info] * rootProject (3.8.0-RC2)
718[info] mdcLogback (3.8.0-RC2)
719[info] kafka (3.8.0-RC2)
720[info] core (3.8.0-RC2)
721[info] otelContext (3.8.0-RC2)
722[info] Excluding projects:
723[info] Reapplying settings...
724[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
725[info] set current project to ox (in build file:/build/repo/)
726Execute mapScalacOptions: ,REQUIRE:-source:3.8,-Wconf:msg=can be rewritten automatically under:s ,-deprecation,-feature,-Xfatal-warnings,-Werror,MATCH:.*-Wconf.*any:e
727[info] Reapplying settings...
728[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
729[info] set current project to ox (in build file:/build/repo/)
730[info] Defining Global / credentials, core / credentials and 6 others.
731[info] The new values will be used by Compile / scalafmtOnly, Global / pgpSelectPassphrase and 63 others.
732[info] Run `last` for details.
733[info] Reapplying settings...
734[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
735[info] set current project to ox (in build file:/build/repo/)
736Execute excludeLibraryDependency: com.github.ghik:zerowaste_{scalaVersion} com.olegpy:better-monadic-for_3 org.polyvariant:better-tostring_{scalaVersion} org.wartremover:wartremover_{scalaVersion}
737[info] Reapplying settings...
738OpenCB::Failed to reapply settings in excludeLibraryDependency: Reference to undefined setting:
739
740 Global / allExcludeDependencies from Global / allExcludeDependencies (CommunityBuildPlugin.scala:331)
741 Did you mean flowReactiveStreams / allExcludeDependencies ?
742 , retry without global scopes
743[info] Reapplying settings...
744[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
745[info] set current project to ox (in build file:/build/repo/)
746Execute removeScalacOptionsStartingWith: -P:wartremover
747[info] Reapplying settings...
748[info] Not a M or RC version, using previous version for MiMa check: Some(1.0.2)
749[info] set current project to ox (in build file:/build/repo/)
750[success] Total time: 0 s, completed Nov 28, 2025, 12:01:21 PM
751Build config: {"projects":{"exclude":[],"overrides":{}},"java":{"version":"21"},"sbt":{"commands":[],"options":[]},"mill":{"options":[]},"tests":"full","migrationVersions":["3.7"],"sourcePatches":[]}
752Parsed config: Success(ProjectBuildConfig(ProjectsConfig(List(),Map()),Full,List()))
753Starting build...
754Projects: Set(flowReactiveStreams, cron, mdcLogback, kafka, core, otelContext)
755Starting build for ProjectRef(file:/build/repo/,flowReactiveStreams) (flow-reactive-streams)... [0/6]
756OpenCB::Exclude Scala3 specific scalacOption `REQUIRE:-source:3.8` in Scala 2.12.20 module Global
757OpenCB::Filter out '-deprecation', matches setting pattern '^-?-deprecation'
758OpenCB::Filter out '-feature', matches setting pattern '^-?-feature'
759Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
760[info] compiling 57 Scala sources to /build/repo/core/target/scala-3.8.0-RC2/classes ...
761[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:187:27
762[warn] 187 | def fromArray[A: ClassTag](array: Array[A]): Chunk[A] =
763[warn] | ^
764[warn] | unused implicit parameter
765[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/Chunk.scala:190:28
766[warn] 190 | def fromIArray[A: ClassTag](array: IArray[A]): Chunk[A] =
767[warn] | ^
768[warn] | unused implicit parameter
769[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:35:45
770[warn] 35 | def addSuppressedException[T](error: F[T], e: Throwable): F[T] = error
771[warn] | ^
772[warn] | unused explicit parameter
773[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/ErrorMode.scala:40:41
774[warn] 40 | def addSuppressedError[T](error: F[T], e: E): F[T] = error
775[warn] | ^
776[warn] | unused explicit parameter
777[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowCompanionOps.scala:138:70
778[warn] 138 | def timeout[T](timeout: FiniteDuration): Flow[T] = usingEmitInline: emit =>
779[warn] | ^^^^
780[warn] | unused explicit parameter
781[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:615:8
782[warn] 615 | tap(t => sleep(emitEveryMillis))
783[warn] | ^
784[warn] | unused explicit parameter
785[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:946:53
786[warn] 946 | def drain(): Flow[Nothing] = Flow.usingEmitInline: emit =>
787[warn] | ^^^^
788[warn] | unused explicit parameter
789[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/flow/FlowTextOps.scala:149:60
790[warn] 149 | def processByteOrderMark(bytes: T, buffer: Chunk[Byte], output: FlowEmit[String]): (Chunk[Byte], State) =
791[warn] | ^^^^^^
792[warn] | unused explicit parameter
793[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:123:16
794[warn] 123 | private var successCalls = 0
795[warn] | ^^^^^^^^^^^^
796[warn] | private variable was mutated but not read
797[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/main/scala/ox/resilience/CircuitBreakerStateMachine.scala:167:16
798[warn] 167 | private var successCalls = 0
799[warn] | ^^^^^^^^^^^^
800[warn] | private variable was mutated but not read
801[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/FlowOps.scala:413:15
802[warn] 413 | case Nested(t) =>
803[warn] | ^
804[warn] |the type test for Nested cannot be checked at runtime because it's a local class
805[warn] |
806[warn] | longer explanation available when compiling with `-explain`
807[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:150:15
808[warn] 150 | case FromParent(t) =>
809[warn] | ^
810[warn] |the type test for FromParent cannot be checked at runtime because it's a local class
811[warn] |
812[warn] | longer explanation available when compiling with `-explain`
813[warn] -- [E092] Pattern Match Unchecked Warning: /build/repo/core/src/main/scala/ox/flow/internal/groupByImpl.scala:154:15
814[warn] 154 | case ChildDone(v) =>
815[warn] | ^
816[warn] |the type test for ChildDone cannot be checked at runtime because it's a local class
817[warn] |
818[warn] | longer explanation available when compiling with `-explain`
819[warn] 13 warnings found
820[info] done compiling
821[info] compiling 1 Scala source to /build/repo/flow-reactive-streams/target/scala-3.8.0-RC2/classes ...
822[info] done compiling
823Starting build for ProjectRef(file:/build/repo/,mdcLogback) (mdc-logback)... [1/6]
824Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
825[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0-RC2/classes ...
826[info] done compiling
827[info] compiling 1 Scala source to /build/repo/mdc-logback/target/scala-3.8.0-RC2/test-classes ...
828[info] done compiling
82912:02:05.231 [pool-28-thread-1] INFO ox.logback.InheritableMDC$ -- Scoped-value based MDC initialized
830[info] InheritableMDCTest:
831[info] - should make MDC values available in forks
832Starting build for ProjectRef(file:/build/repo/,core) (core)... [2/6]
833Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
834[info] compiling 112 Scala sources to /build/repo/core/target/scala-3.8.0-RC2/test-classes ...
835[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/CollectParTest.scala:43:23
836[warn] 43 | def transformation(i: Int) =
837[warn] | ^
838[warn] | unused explicit parameter
839[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ForeachParTest.scala:38:23
840[warn] 38 | def transformation(i: Int) =
841[warn] | ^
842[warn] | unused explicit parameter
843[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/MapParTest.scala:43:23
844[warn] 43 | def transformation(i: Int) =
845[warn] | ^
846[warn] | unused explicit parameter
847[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ParTest.scala:80:21
848[warn] 80 | (1 to 5).map(i =>
849[warn] | ^
850[warn] | unused explicit parameter
851[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:126:41
852[warn] 126 | use(new TestResource, _.release()) { r =>
853[warn] | ^
854[warn] | unused explicit parameter
855[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:140:37
856[warn] 140 | useCloseable(new TestResource) { r =>
857[warn] | ^
858[warn] | unused explicit parameter
859[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/ResourceTest.scala:157:43
860[warn] 157 | use(new TestResource, _.release()) { r =>
861[warn] | ^
862[warn] | unused explicit parameter
863[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowCompanionIOOpsTest.scala:5:19
864[warn] 5 |import ox.{timeout as _, *}
865[warn] | ^^^^^^^^^^^^
866[warn] | unused import
867[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:5:19
868[warn] 5 |import ox.{timeout as _, *}
869[warn] | ^^^^^^^^^^^^
870[warn] | unused import
871[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowIOOpsTest.scala:386:44
872[warn] 386 | private def fileContent(path: Path)(using Ox): List[String] = Flow.fromFile(path).runToList().map(_.asStringUtf8)
873[warn] | ^
874[warn] | unused implicit parameter
875[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:16:27
876[warn] 16 | .groupBy(10, _ % 10)(v => f => f)
877[warn] | ^
878[warn] | unused explicit parameter
879[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:22:44
880[warn] 22 | Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
881[warn] | ^
882[warn] | unused explicit parameter
883[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:28:68
884[warn] 28 | for i <- 1 to 100000 do Flow.fromValues(42).groupBy(10, _ % 10)(v => f => f).runToList() shouldBe List(42)
885[warn] | ^
886[warn] | unused explicit parameter
887[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:109:26
888[warn] 109 | .groupBy(1, _ => 0)(v => _.tap(_ => sleep(10.millis)))
889[warn] | ^
890[warn] | unused explicit parameter
891[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:119:31
892[warn] 119 | .groupBy(10, _ % 10)(v => f => f.tap(i => if i == 13 then throw new RuntimeException("boom!")))
893[warn] | ^
894[warn] | unused explicit parameter
895[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:131:30
896[warn] 131 | .groupBy(1, _ => 0)(v => f => f.tap(_ => sleep(100.millis).tap(_ => throw new RuntimeException("boom!"))))
897[warn] | ^
898[warn] | unused explicit parameter
899[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:142:31
900[warn] 142 | .groupBy(10, _ % 10)(v => f => f)
901[warn] | ^
902[warn] | unused explicit parameter
903[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/flow/FlowOpsGroupByTest.scala:153:29
904[warn] 153 | .groupBy(10, _ % 10)(v => f => f.take(1))
905[warn] | ^
906[warn] | unused explicit parameter
907[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:24:21
908[warn] 24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
909[warn] | ^^^^^^^
910[warn] | unused explicit parameter
911[warn] -- [E198] Unused Symbol Warning: /build/repo/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:50:21
912[warn] 50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
913[warn] | ^^^^^^^
914[warn] | unused explicit parameter
915[warn] 20 warnings found
916[info] done compiling
917[info] AfterAttemptTest:
918[info] RetryPolicy afterAttempt callback
919[info] - should retry a succeeding function with afterAttempt callback
920[info] - should retry a failing function with afterAttempt callback
921[info] SourceOpsTest:
922[info] - should pipe one source to another
923[info] - should pipe one source to another (with done propagation)
924[info] - should tap over a source
925[info] FlowOpsReduceTest:
926[info] reduce
927[info] - should throw NoSuchElementException for reduce over the empty source
928[info] - should throw exception thrown in `f` when `f` throws
929[info] - should return first element from reduce over the single element source
930[info] - should run reduce over on non-empty source
931[info] ExceptionTest:
932[info] unsupervised
933[info] - should throw the exception thrown by a joined fork
934[info] supervised
935[2025-11-28T11:02:29.197537899Z] [24] CustomException
936[2025-11-28T11:02:29.203627140Z] [24] CustomException
937[info] - should throw the exception thrown in the scope
938[2025-11-28T11:02:29.206076642Z] [24] CustomException(suppressed=ExecutionException)
939[info] - should retain the original exception for context, as suppressed
940[2025-11-28T11:02:29.208610439Z] [24] CustomException
941[info] - should throw the exception thrown by a failing fork
942[2025-11-28T11:02:29.313278731Z] [24] CustomException(suppressed=ExecutionException,InterruptedException,InterruptedException)
943[info] - should interrupt other forks when there's a failure, add suppressed interrupted exceptions
944[2025-11-28T11:02:29.417686639Z] [24] CustomException(suppressed=ExecutionException,CustomException2)
945[info] - should interrupt other forks when there's a failure, add suppressed custom exceptions
946[2025-11-28T11:02:29.420182271Z] [24] CustomException(suppressed=ExecutionException,InterruptedException)
947[info] - should not add the original exception as suppressed
948[2025-11-28T11:02:29.425826591Z] [24] CustomException(suppressed=ExecutionException,CustomException3)
949[info] - should add an exception as suppressed, even if it wraps the original exception
950[info] joinEither
951[info] - should catch the exception with which a fork ends
952[info] ScheduleFallingBackRetryTest:
953[info] retry with combination of schedules
954[info] - should retry 3 times immediately and then 2 times with delay
955[info] - should retry forever
956[info] DelayedRetryTest:
957[info] Delayed retry
958[info] - should retry a function
959[info] - should retry a failing function forever
960[info] - should retry an Either
961[info] adaptive retry with delayed config
962[info] - should retry a failing function forever or until adaptive retry blocks it
963[info] CircuitBreakerTest:
964[info] Circuit Breaker run operations
965[info] - should run operation when metrics are not exceeded
966[info] - should drop operation after exceeding failure threshold
967[info] - should drop operation after exceeding slow call threshold
968[info] Circuit Breaker scheduled state changes
969[info] - should switch to halfOpen after configured time
970[info] - should switch back to open after configured timeout in half open state
971[info] - should correctly transitions through states when there are concurrently running operations
972[info] - should correctly calculate metrics when results come in after state change
973[info] FlowOpsTakeWhileTest:
974[info] takeWhile
975[info] - should not take from the empty flow
976[info] - should take as long as predicate is satisfied
977[info] - should take the failed element if includeFirstFailing = true
978[info] - should work if all elements match the predicate
979[info] - should fail the sourcewith the same exception as the initial source
980[info] - should not take if predicate fails for first or more elements
981[info] FlowOpsInterleaveAllTest:
982[info] interleaveAll
983[info] - should interleave no sources
984[info] - should interleave a single flow
985[info] - should interleave multiple flows
986[info] - should interleave multiple flows using custom segment size
987[info] - should interleave multiple flows using custom segment size and complete eagerly
988[info] AppErrorTest:
989[info] supervisedError
990[info] - should return the app error from the main body
991[info] - should return success from the main body
992[info] - should return the app error returned by a failing fork
993[info] - should return success from the main body if a fork is successful
994[info] - should interrupt other forks if one fails
995[info] ChunkTest:
996[info] Chunk
997[info] - should create empty chunks
998[info] - should create chunks from arrays
999[info] - should create chunks from IArrays
1000[info] - should create chunks from elements
1001[info] - should create empty chunks from empty arrays
1002[info] - should support random access
1003[info] - should throw IndexOutOfBoundsException for invalid indices
1004[info] - should support iteration
1005[info] - should support foreach operations
1006[info] - should concatenate two non-empty chunks efficiently
1007[info] - should handle concatenation with empty chunks
1008[info] - should support chained concatenation
1009[info] - should concatenate chunks of different types
1010[info] - should concatenate non-empty chunk with non-chunk collections
1011[info] - should concatenate empty chunk with non-chunk collections
1012[info] - should handle concatenation with empty collections
1013[info] - should support drop operations
1014[info] - should support take operations
1015[info] - should handle drop/take on concatenated chunks
1016[info] - should support map operations
1017[info] - should support filter operations
1018[info] - should support collect operations
1019[info] - should convert to arrays correctly
1020[info] - should convert concatenated chunks to arrays correctly
1021[info] - should convert byte chunks to strings
1022[info] - should convert concatenated byte chunks to strings
1023[info] - should provide access to backing arrays
1024[info] - should allow efficient processing via backing arrays
1025[info] - should handle operations on empty chunks
1026[info] - should maintain consistency between single and multi-array chunks
1027[info] - should handle large chunks efficiently
1028[info] - should support indexWhere on single chunks
1029[info] - should support indexWhere on concatenated chunks
1030[info] - should handle indexWhere on empty chunks
1031[info] - should handle indexWhere edge cases with concatenated chunks
1032[info] - should support contains and exists operations
1033[info] FlowOpsFoldTest:
1034[info] fold
1035[info] - should throw an exception for a failed flow
1036[info] - should throw exception thrown in `f` when `f` throws
1037[info] - should return `zero` value from fold on the empty source
1038[info] - should return fold on non-empty fold
1039[info] FlowOpsFilterTest:
1040[info] filter
1041[info] - should not filter anything from the empty flow
1042[info] - should filter out everything if no element meets 'f'
1043[info] - should not filter anything if all the elements meet 'f'
1044[info] - should filter out elements that don't meet 'f'
1045[info] FlowOpsMapUsingSinkTest:
1046[info] mapUsingSink
1047[info] - should map over a source, using emit
1048[info] - should propagate errors
1049[info] FlowOpsCollectTest:
1050[info] collect
1051[info] - should collect over a source
1052[info] FlowOpsGroupedTest:
1053[info] grouped
1054[info] - should emit grouped elements
1055[info] - should emit grouped elements and include remaining values when flow closes
1056[info] - should return failed flow when the original flow is failed
1057[info] groupedWeighted
1058[info] - should emit grouped elements with custom cost function
1059[info] - should return failed flow when cost function throws exception
1060[info] - should return failed source when the original source is failed
1061[info] groupedWithin
1062[info] - should group first batch of elements due to limit and second batch due to timeout
1063[info] - should group first batch of elements due to timeout and second batch due to limit
1064[info] - should wake up on new element and send it immediately after first batch is sent and channel goes to time-out mode
1065[info] - should send the group only once when the channel is closed
1066[info] - should return failed source when the original source is failed
1067[info] groupedWeightedWithin
1068[info] - should group elements on timeout in the first batch and consider max weight in the remaining batches
1069[info] - should return failed source when cost function throws exception
1070[info] - should return failed source when the original source is failed
1071[info] MapParTest:
1072[info] mapPar
1073[info] - should output the same type as input
1074[info] - should run computations in parallel
1075[info] - should run not more computations than limit
1076[2025-11-28T11:02:50.344738520Z] [386] exception
1077[2025-11-28T11:02:50.347164675Z] [24] catch
1078[2025-11-28T11:02:50.647370238Z] [24] all done
1079[info] - should interrupt other computations in one fails
1080[info] RateLimiterInterfaceTest:
1081[info] RateLimiter interface
1082[info] - should drop or block operation depending on method used for fixed rate algorithm
1083[info] - should drop or block operation depending on method used for sliding window algorithm
1084[info] - should drop or block operation depending on method used for bucket algorithm
1085[info] - should drop or block operation concurrently
1086[info] UtilTest:
1087[info] discard
1088[2025-11-28T11:02:56.673278656Z] [24] in f
1089[info] - should do nothing
1090[info] tapException
1091[2025-11-28T11:02:56.674214609Z] [24] in callback: boom!
1092[2025-11-28T11:02:56.674291513Z] [24] in catch: boom!
1093[2025-11-28T11:02:56.674879969Z] [24] 42
1094[2025-11-28T11:02:56.674951261Z] [24] after
1095[info] - should run the callback when an exception is thrown
1096[info] - should not run the callback when no exception is thrown
1097[2025-11-28T11:02:56.677819902Z] [24] in catch: boom! 1
1098[info] - should suppress any additional exceptions
1099[2025-11-28T11:02:56.679672435Z] [24] Adding
1100[2025-11-28T11:02:56.679758445Z] [24] Got: 3
1101some label: 10
1102[info] pipe
1103[info] - should work
1104[info] tap
1105[info] - should work
1106[info] debug as extension
1107[info] - should work
1108[info] debug as top-level method
1109x.+(1) = 11
1110[info] - should work
1111[info] FlowOpsLastTest:
1112[info] last
1113[info] - should throw NoSuchElementException for the empty source
1114[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
1115[info] - should return last element for the non-empty source
1116[info] FlowOpsFailedTest:
1117[info] failed
1118[info] - should fail on receive
1119[info] FlowOpsFlattenTest:
1120[info] flatten
1121[info] - should flatten nested flows
1122[info] WeightedHeapTest:
1123[info] WeightedHeap
1124[info] - should allow inserting elements with weights
1125[info] - should allow extracting the minimum element
1126[info] - should return None when extracting from an empty heap
1127[info] - should return the correct size after operations
1128[info] - should handle empty heaps correctly
1129[info] - should update the weight of an existing element and adjust its position
1130[info] - should throw an exception when updating the weight of a non-existent element
1131[info] - should handle multiple insertions and updates correctly
1132[info] - should handle duplicate insertions by updating the existing element's weight
1133[info] - should handle increasing the weight of an existing element
1134[info] - should maintain heap property after multiple weight increases
1135[info] - should work correctly when increasing the weight of the current minimum element
1136[info] - should handle increasing weights in a large heap
1137[info] - should maintain the heap property after multiple operations
1138[info] - should work with large numbers of elements
1139[info] - should maintain heap property with random insertions and extractions
1140[info] - should maintain heap property with random weight updates
1141[info] FlowOpsMapConcatTest:
1142[info] mapConcat
1143[info] - should unfold iterables
1144[info] - should transform elements
1145[info] - should handle empty lists
1146[info] - should propagate errors in the mapping function
1147[info] FlowOpsPipeToTest:
1148[info] - should pipe one source to another
1149[info] - should pipe one source to another (with done propagation)
1150[info] FlowOpsRecoverTest:
1151[info] Flow.recover
1152[info] - should pass through elements when upstream flow succeeds
1153[info] - should emit recovery value when upstream flow fails with handled exception
1154[info] - should not emit recovery value when downstream flow fails with handled exception
1155[info] - should propagate unhandled exceptions
1156[info] - should handle multiple exception types
1157[info] - should work with different recovery value type
1158[info] - should handle exception thrown during flow processing
1159[info] - should work with empty flow
1160[info] - should propagate exception when partial function throws
1161[info] FlowOpsMapStatefulTest:
1162[info] mapStateful
1163[info] - should zip with index
1164[info] - should calculate a running total
1165[info] - should be able to emit different values than incoming ones
1166[info] - should propagate errors in the mapping function
1167[info] - should propagate errors in the completion callback
1168[info] FlowOpsMapTest:
1169[info] map
1170[info] - should map over a source
1171[info] - should map over a source using for-syntax
1172[info] ForkTest:
1173[info] fork
1174[2025-11-28T11:02:56.861402174Z] [24] main mid
1175[2025-11-28T11:02:57.362000509Z] [435] f1 complete
1176[2025-11-28T11:02:57.862039480Z] [436] f2 complete
1177[2025-11-28T11:02:57.862260568Z] [24] result = 11
1178[info] - should run two forks concurrently
1179[2025-11-28T11:02:57.864571761Z] [438] f2 complete
1180[2025-11-28T11:02:57.864730345Z] [437] f1 complete
1181[2025-11-28T11:02:57.864969170Z] [24] result = 11
1182[info] - should allow nested forks
1183[2025-11-28T11:02:57.866374065Z] [24] main mid
1184[2025-11-28T11:02:58.366979014Z] [439] f1 complete
1185[2025-11-28T11:02:58.367212285Z] [24] result = 5
1186[2025-11-28T11:02:58.367461409Z] [440] f2 interrupted
1187[info] - should interrupt child forks when parents complete
1188[2025-11-28T11:02:58.369655513Z] [444] in fork
1189[info] - should allow starting forks within a forkCancellable body, using the outer scope
1190[2025-11-28T11:02:58.471797980Z] [447] in fork
1191[info] - should allow starting forks in outer scope, from an inner scope
1192[2025-11-28T11:02:58.473411366Z] [449] IllegalStateException
1193[info] - should not allow starting forks from a thread created not by the scope
1194[info] FlowOpsIntersperseTest:
1195[info] Flow.intersperse
1196[info] - should intersperse with inject only over an empty source
1197[info] - should intersperse with inject only over a source with one element
1198[info] - should intersperse with inject only over a source with multiple elements
1199[info] - should intersperse with start, inject and end over an empty source
1200[info] - should intersperse with start, inject and end over a source with one element
1201[info] - should intersperse with start, inject and end over a source with multiple elements
1202[info] FlowOpsScanTest:
1203[info] scan
1204[info] - should scan the empty flow
1205[info] - should scan a flow of summed Int
1206[info] - should scan a flow of multiplied Int
1207[info] - should scan a flow of concatenated String
1208[info] FlowOpsUsingSinkTest:
1209[info] usingSink
1210[info] - should send the passed elements
1211[info] FlowOpsTakeTest:
1212[info] take
1213[info] - should take from a simple flow
1214[info] - should take from an async flow
1215[info] - should take all if the flow ends sooner than the desired number of elements
1216[info] EitherTest:
1217[info] either
1218[info] - should work correctly when invoked on eithers
1219[info] - should work correctly when invoked on options
1220[info] - should work correctly when invoked on fork
1221[info] - should report a proper compilation error when used outside of either:
1222[info] - should report a proper compilation error when wrong error type is used for ok() (explicit type params)
1223[info] - should report a proper compilation error when wrong successful type is used (explicit type params)
1224[info] - should report a proper compilation error when wrong type annotation is used for ok() (error)
1225[info] - should report a proper compilation error when wrong type annotation is used (success)
1226[info] - should report a proper compilation error when wrong error type is used for fail() (explicit type params)
1227[info] - should report a proper compilation error when wrong type annotation is used for fail() (error)
1228[info] - should catch non fatal exceptions
1229[info] - should not catch fatal exceptions
1230[info] - should provide an either scope when catching non fatal exceptions
1231[info] - should report a proper compilation error when wrong error type is used for ok() in catchingNonFatal block
1232[info] - should work when combined with mapPar
1233[info] - should not allow nesting of eithers
1234[info] orThrow
1235[info] - should unwrap the value for a Right-value
1236[info] - should throw exceptions for a Left-value
1237[info] catching
1238[info] - should catch given exceptions only
1239[info] - should catch parent exceptions
1240[info] - should not catch non-given exceptions
1241[info] - should not catch fatal exceptions
1242[info] - should return successful results as Right-values
1243[info] FlowIOOpsTest:
1244[info] asInputStream
1245[info] - should return an empty InputStream for an empty source
1246[info] - should return an InputStream for a simple source
1247[info] - should correctly track available bytes
1248[info] - should support bulk read operations with read(byte[])
1249[info] - should handle bulk read operations across multiple chunks
1250[info] - should handle bulk read with concatenated chunks (multiple backing arrays)
1251[info] - should handle read(byte[], offset, length) with various parameters
1252[info] - should handle edge cases for read(byte[], offset, length)
1253[info] - should throw appropriate exceptions for invalid read parameters
1254[info] - should maintain consistency between single-byte and bulk reads
1255[info] - should handle chunks with empty backing arrays
1256[info] - should handle flow with only empty chunks
1257[info] - should handle mixed empty and non-empty chunks in flow
1258[info] toOutputStream
1259[info] - should write a single chunk with bytes to an OutputStream
1260[info] - should write multiple chunks with bytes to an OutputStream
1261[info] - should write concatenated chunks to an OutputStream
1262[info] - should handle an empty Source
1263[info] - should close the OutputStream on write error
1264[info] - should close the OutputStream on error
1265[info] toFile
1266[info] - should open existing file and write a single chunk with bytes
1267[info] - should open existing file and write multiple chunks with bytes
1268[info] - should create file and write multiple chunks with bytes
1269[info] - should write concatenated chunks to a file
1270[info] - should use an existing file and overwrite it a single chunk with bytes
1271[info] - should handle an empty source
1272[info] - should throw an exception on failing Source
1273[info] - should throw an exception if path is a directory
1274[info] - should throw an exception if file cannot be opened
1275[info] FlowOpsConcatPrependTest:
1276[info] concat
1277[info] - should concat other source
1278[info] prepend
1279[info] - should prepend other source
1280[info] FlowTextOpsTest:
1281[info] linesUtf8
1282[info] - should split a single chunk of bytes into lines
1283[info] - should split a single chunk of bytes into lines (multiple newlines)
1284[info] - should split a single chunk of bytes into lines (beginning with newline)
1285[info] - should split a single chunk of bytes into lines (ending with newline)
1286[info] - should split a single chunk of bytes into lines (empty array)
1287[info] - should split a multiple chunks of bytes into lines
1288[info] - should split a multiple chunks of bytes into lines (multiple newlines)
1289[info] - should split a multiple chunks of bytes into lines (multiple empty chunks)
1290[info] lines(charset)
1291zażółć
1292gęślą
1293jaźń
1294[info] - should decode lines with specified charset
1295[info] - should decode lines correctly across chunk boundaries
1296[info] decodeStringUtf8
1297[info] - should decode a simple string
1298[info] - should decode a chunked string with UTF-8 multi-byte characters
1299[info] - should handle an empty Source
1300[info] - should handle partial BOM
1301[info] - should handle a string shorter than BOM
1302[info] - should handle empty chunks
1303[info] encodeUtf8
1304[info] - should handle empty String
1305[info] - should encode a string
1306[info] FlowOpsZipWithIndexTest:
1307[info] zipWithIndex
1308[info] - should not zip anything from an empty flow
1309[info] - should zip flow with index
1310[2025-11-28T11:02:58.673693045Z] [24] allocate
1311[2025-11-28T11:02:58.675520448Z] [550] release 1
1312[info] ResourceTest:
1313[info] useInScope
1314[info] - should release resources after allocation
1315[2025-11-28T11:02:58.677585089Z] [24] allocate 1
1316[2025-11-28T11:02:58.678229631Z] [24] allocate 2
1317[2025-11-28T11:02:58.678379970Z] [551] release 2
1318[2025-11-28T11:02:58.678429102Z] [551] release 1
1319[info] - should release resources in reverse order
1320[2025-11-28T11:02:58.679978855Z] [24] allocate 1
1321[2025-11-28T11:02:58.680557371Z] [24] allocate 2
1322[2025-11-28T11:02:58.680715568Z] [552] release 2
1323[2025-11-28T11:02:58.680809473Z] [552] release 1
1324[2025-11-28T11:02:58.681554369Z] [24] exception
1325[info] - should release resources when there's an exception
1326[2025-11-28T11:02:58.682893850Z] [24] allocate 1
1327[2025-11-28T11:02:58.683431741Z] [24] allocate 2
1328[2025-11-28T11:02:58.683549933Z] [553] release 2
1329[2025-11-28T11:02:58.683678232Z] [553] release 1
1330[2025-11-28T11:02:58.683884576Z] [24] exception e2
1331[info] - should release resources when there's an exception during releasing (normal resutl)
1332[2025-11-28T11:02:58.685374467Z] [24] allocate 1
1333[2025-11-28T11:02:58.685936724Z] [24] allocate 2
1334[2025-11-28T11:02:58.686202240Z] [554] release 2
1335[2025-11-28T11:02:58.686333212Z] [554] release 1
1336[2025-11-28T11:02:58.686560988Z] [24] exception e3
1337[info] - should release resources when there's an exception during releasing (exceptional resutl)
1338[2025-11-28T11:02:58.687994734Z] [24] in scope
1339[2025-11-28T11:02:58.688131210Z] [555] release
1340[info] - should release registered resources
1341[2025-11-28T11:02:58.689370597Z] [24] allocate
1342[2025-11-28T11:02:58.689438874Z] [24] in scope
1343[2025-11-28T11:02:58.690228263Z] [556] release
1344[info] - should use a resource
1345[2025-11-28T11:02:58.691773149Z] [24] allocate
1346[2025-11-28T11:02:58.692108283Z] [24] in scope
1347[2025-11-28T11:02:58.692949158Z] [557] release
1348[info] - should use a closeable resource
1349[2025-11-28T11:02:58.694137500Z] [24] allocate
1350[2025-11-28T11:02:58.694316583Z] [24] in scope
1351[2025-11-28T11:02:58.695107095Z] [558] release
1352[2025-11-28T11:02:58.695745292Z] [24] exception e2 (e1)
1353[info] - should add suppressed exception when there's an exception during releasing
1354[info] FlowOpsMapParTest:
1355[info] mapPar
1356[info] - should map over a flow with parallelism limit 1
1357[info] - should map over a flow with parallelism limit 2
1358[info] - should map over a flow with parallelism limit 3
1359[info] - should map over a flow with parallelism limit 4
1360[info] - should map over a flow with parallelism limit 5
1361[info] - should map over a flow with parallelism limit 6
1362[info] - should map over a flow with parallelism limit 7
1363[info] - should map over a flow with parallelism limit 8
1364[info] - should map over a flow with parallelism limit 9
1365[info] - should map over a flow with parallelism limit 10
1366[info] - should map over a flow with parallelism limit 10 (stress test)
1367[info] + iteration 1
1368[info] + iteration 2
1369[info] + iteration 3
1370[info] + iteration 4
1371[info] + iteration 5
1372[info] + iteration 6
1373[info] + iteration 7
1374[info] + iteration 8
1375[info] + iteration 9
1376[info] + iteration 10
1377[info] + iteration 11
1378[info] + iteration 12
1379[info] + iteration 13
1380[info] + iteration 14
1381[info] + iteration 15
1382[info] + iteration 16
1383[info] + iteration 17
1384[info] + iteration 18
1385[info] + iteration 19
1386[info] + iteration 20
1387[info] + iteration 21
1388[info] + iteration 22
1389[info] + iteration 23
1390[info] + iteration 24
1391[info] + iteration 25
1392[info] + iteration 26
1393[info] + iteration 27
1394[info] + iteration 28
1395[info] + iteration 29
1396[info] + iteration 30
1397[info] + iteration 31
1398[info] + iteration 32
1399[info] + iteration 33
1400[info] + iteration 34
1401[info] + iteration 35
1402[info] + iteration 36
1403[info] + iteration 37
1404[info] + iteration 38
1405[info] + iteration 39
1406[info] + iteration 40
1407[info] + iteration 41
1408[info] + iteration 42
1409[info] + iteration 43
1410[info] + iteration 44
1411[info] + iteration 45
1412[info] + iteration 46
1413[info] + iteration 47
1414[info] + iteration 48
1415[info] + iteration 49
1416[info] + iteration 50
1417[info] + iteration 51
1418[info] + iteration 52
1419[info] + iteration 53
1420[info] + iteration 54
1421[info] + iteration 55
1422[info] + iteration 56
1423[info] + iteration 57
1424[info] + iteration 58
1425[info] + iteration 59
1426[info] + iteration 60
1427[info] + iteration 61
1428[info] + iteration 62
1429[info] + iteration 63
1430[info] + iteration 64
1431[info] + iteration 65
1432[info] + iteration 66
1433[info] + iteration 67
1434[info] + iteration 68
1435[info] + iteration 69
1436[info] + iteration 70
1437[info] + iteration 71
1438[info] + iteration 72
1439[info] + iteration 73
1440[info] + iteration 74
1441[info] + iteration 75
1442[info] + iteration 76
1443[info] + iteration 77
1444[info] + iteration 78
1445[info] + iteration 79
1446[info] + iteration 80
1447[info] + iteration 81
1448[info] + iteration 82
1449[info] + iteration 83
1450[info] + iteration 84
1451[info] + iteration 85
1452[info] + iteration 86
1453[info] + iteration 87
1454[info] + iteration 88
1455[info] + iteration 89
1456[info] + iteration 90
1457[info] + iteration 91
1458[info] + iteration 92
1459[info] + iteration 93
1460[info] + iteration 94
1461[info] + iteration 95
1462[info] + iteration 96
1463[info] + iteration 97
1464[info] + iteration 98
1465[info] + iteration 99
1466[info] + iteration 100
1467[info] - should propagate errors
1468[2025-11-28T11:03:07.298026053Z] [1917] done
1469[2025-11-28T11:03:07.298012243Z] [1916] done
1470[2025-11-28T11:03:07.398476805Z] [1919] exception
1471[info] - should cancel other running forks when there's an error
1472[info] - should handle empty flow
1473[info] - should handle flow with exactly parallelism number of elements
1474[info] - should handle flow with less than parallelism number of elements
1475[info] - should preserve order even with varying processing times
1476[info] - should preserve order with random processing times
1477[info] - should work with very high parallelism values
1478[info] SelectOrClosedWithinTest:
1479[info] selectOrClosedWithin
1480[info] - should select a clause that can complete immediately
1481[info] - should return timeout when no clause can complete within the timeout
1482[info] - should select a source that has a value immediately
1483[info] - should return timeout when no source has a value within the timeout
1484[info] - should work with different timeout value types
1485[info] - should handle empty clauses sequence
1486[info] - should handle empty sources sequence
1487[info] selectOrClosedWithin with single clause
1488[info] - should complete when clause is ready
1489[info] - should timeout when clause is not ready
1490[info] selectOrClosedWithin with multiple clauses
1491[info] - should select the first ready clause
1492[info] - should timeout when no clauses are ready
1493[info] selectOrClosedWithin with sources
1494[info] - should select from ready source
1495[info] - should timeout when no sources are ready
1496[info] selectOrClosedWithin error scenarios
1497[info] - should handle channel closed with done
1498[info] - should handle channel closed with error
1499[info] - should prioritize ready channels over closed ones
1500[info] selectOrClosedWithin with different timeout types
1501[info] - should work with various timeout value types
1502[info] selectOrClosedWithin with sequences
1503[info] - should handle empty sequences
1504[info] - should handle sequence of clauses
1505[info] - should handle sequence of sources
1506[info] selectOrClosedWithin with various arities
1507[info] - should work with all supported clause counts
1508[info] - should work with all supported source counts
1509[info] FlowOpsSplitTest:
1510[info] split
1511[info] - should split an empty flow
1512[info] - should split a flow with no delimiters
1513[info] - should split a flow with delimiter at the beginning
1514[info] - should split a flow with delimiter at the end
1515[info] - should split a flow with delimiter in the middle
1516[info] - should split a flow with multiple delimiters
1517[info] - should split a flow with adjacent delimiters
1518[info] - should split a flow with only delimiters
1519[info] - should split a flow with single delimiter
1520[info] - should split a flow with single non-delimiter
1521[info] - should split a flow with multiple consecutive delimiters at the beginning
1522[info] - should split a flow with multiple consecutive delimiters at the end
1523[info] - should split a flow with string delimiters
1524[info] - should split a flow using complex predicate
1525[info] - should handle error propagation
1526[info] - should split a large flow efficiently
1527[info] JitterTest:
1528[info] Jitter
1529[info] - should use no jitter
1530[info] - should use full jitter
1531[info] - should use equal jitter
1532[info] - should use decorrelated jitter
1533[info] FlowOpsAlsoToTest:
1534[info] alsoTo
1535[info] - should send to both sinks
1536[info] - should send to both sinks and not hang when other sink is rendezvous channel
1537[info] - should close main flow when other closes
1538[info] - should close main flow with error when other errors
1539[info] - should close other channel with error when main errors
1540[info] FlowOpsBufferTest:
1541[info] buffer
1542[info] - should work with a single async boundary
1543[info] - should work with multiple async boundaries
1544[info] - should propagate errors
1545[info] BackoffRetryTest:
1546[info] Backoff retry
1547[info] - should retry a function
1548[info] - should retry a failing function forever
1549[info] - should respect maximum delay
1550[info] - should use jitter
1551[info] - should retry an Either
1552[info] FlowOpsEnsureTest:
1553[info] ensure.onComplete
1554[info] - should run in case of success
1555[info] - should run in case of error
1556[info] ensure.onDone
1557[info] - should run in case of success
1558[info] - should not run in case of error
1559[info] ensure.onError
1560[info] - should not run in case of success
1561[info] - should run in case of error
1562[info] FlowOpsTakeLastTest:
1563[info] takeLast
1564[info] - should throw ChannelClosedException.Error for source failed without exception
1565[info] - should fail to takeLast when n < 0
1566[info] - should return empty list for the empty source
1567[info] - should return empty list when n == 0 and list is not empty
1568[info] - should return list with all elements if the source is smaller than requested number
1569[info] - should return the last n elements from the source
1570[info] FlowOpsZipAllTest:
1571[info] zipAll
1572[info] - should not emit any element when both flows are empty
1573[info] - should emit this element when other flow is empty
1574[info] - should emit other element when this flow is empty
1575[info] - should emit matching elements when both flows are of the same size
1576[info] - should emit default for other flow if this flow is longer
1577[info] - should emit default for this flow if other flow is longer
1578[info] FlowPublisherTckTest:
1579[info] - required_createPublisher1MustProduceAStreamOfExactly1Element
1580[info] - required_createPublisher3MustProduceAStreamOfExactly3Elements
1581[info] - required_validate_maxElementsFromPublisher
1582[info] - required_validate_boundedDepthOfOnNextAndRequestRecursion
1583[info] - required_spec101_subscriptionRequestMustResultInTheCorrectNumberOfProducedElements
1584[info] - required_spec102_maySignalLessThanRequestedAndTerminateSubscription
1585[info] - stochastic_spec103_mustSignalOnMethodsSequentially
1586[info] - optional_spec104_mustSignalOnErrorWhenFails
1587[info] - required_spec105_mustSignalOnCompleteWhenFiniteStreamTerminates
1588[info] - optional_spec105_emptyStreamMustTerminateBySignallingOnComplete
1589[info] - required_spec107_mustNotEmitFurtherSignalsOnceOnCompleteHasBeenSignalled
1590[info] - untested_spec107_mustNotEmitFurtherSignalsOnceOnErrorHasBeenSignalled !!! IGNORED !!!
1591[info] - untested_spec109_subscribeShouldNotThrowNonFatalThrowable !!! IGNORED !!!
1592[info] - required_spec109_subscribeThrowNPEOnNullSubscriber
1593[info] - required_spec109_mustIssueOnSubscribeForNonNullSubscriber
1594[info] - required_spec109_mayRejectCallsToSubscribeIfPublisherIsUnableOrUnwillingToServeThemRejectionMustTriggerOnErrorAfterOnSubscribe
1595[info] - untested_spec110_rejectASubscriptionRequestIfTheSameSubscriberSubscribesTwice !!! IGNORED !!!
1596[info] - optional_spec111_maySupportMultiSubscribe
1597[info] - optional_spec111_registeredSubscribersMustReceiveOnNextOrOnCompleteSignals
1598[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingOneByOne
1599[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfront
1600[info] - optional_spec111_multicast_mustProduceTheSameElementsInTheSameSequenceToAllOfItsSubscribersWhenRequestingManyUpfrontAndCompleteAsExpected
1601[info] - required_spec302_mustAllowSynchronousRequestCallsFromOnNextAndOnSubscribe
1602[info] - required_spec303_mustNotAllowUnboundedRecursion
1603[info] - untested_spec304_requestShouldNotPerformHeavyComputations !!! IGNORED !!!
1604[info] - untested_spec305_cancelMustNotSynchronouslyPerformHeavyComputation !!! IGNORED !!!
1605[info] - required_spec306_afterSubscriptionIsCancelledRequestMustBeNops
1606[info] - required_spec307_afterSubscriptionIsCancelledAdditionalCancelationsMustBeNops
1607[info] - required_spec309_requestZeroMustSignalIllegalArgumentException
1608[info] - required_spec309_requestNegativeNumberMustSignalIllegalArgumentException
1609[info] - required_spec312_cancelMustMakeThePublisherToEventuallyStopSignaling
1610[info] - required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber
1611[info] - required_spec317_mustSupportAPendingElementCountUpToLongMaxValue
1612[info] - required_spec317_mustSupportACumulativePendingElementCountUpToLongMaxValue
1613[info] - required_spec317_mustNotSignalOnErrorWhenPendingAboveLongMaxValue
1614[info] - optional_spec309_requestNegativeNumberMaySignalIllegalArgumentExceptionWithSpecificMessage
1615[info] - untested_spec108_possiblyCanceledSubscriptionShouldNotReceiveOnErrorOrOnCompleteSignals !!! IGNORED !!!
1616[info] - untested_spec106_mustConsiderSubscriptionCancelledAfterOnErrorOrOnCompleteHasBeenCalled !!! IGNORED !!!
1617[info] SourceOpsFactoryMethodsTest:
1618[info] Source factory methods
1619[info] - should create a source from a fork
1620[info] CancelTest:
1621[info] cancel
1622[2025-11-28T11:03:15.976168195Z] [2410] started
1623[2025-11-28T11:03:16.076477866Z] [2410] interrupted
1624[2025-11-28T11:03:16.576887807Z] [2410] interrupted done
1625[2025-11-28T11:03:16.577165492Z] [2408] cancel done
1626[info] - should block until the fork completes
1627[2025-11-28T11:03:17.579917374Z] [2411] cancel done
1628[2025-11-28T11:03:17.681729644Z] [2416] interrupted
1629[2025-11-28T11:03:17.782074119Z] [2416] interrupted done
1630[2025-11-28T11:03:17.782261708Z] [2414] cancel done
1631[2025-11-28T11:03:17.882769657Z] [2417] cancel done
1632[2025-11-28T11:03:17.984410045Z] [2422] interrupted
1633[2025-11-28T11:03:18.084689741Z] [2422] interrupted done
1634[2025-11-28T11:03:18.084921458Z] [2420] cancel done
1635[2025-11-28T11:03:18.185475637Z] [2423] cancel done
1636[2025-11-28T11:03:18.287237016Z] [2428] interrupted
1637[2025-11-28T11:03:18.387563379Z] [2428] interrupted done
1638[2025-11-28T11:03:18.387733412Z] [2426] cancel done
1639[2025-11-28T11:03:18.488342833Z] [2429] cancel done
1640[2025-11-28T11:03:18.590087638Z] [2434] interrupted
1641[2025-11-28T11:03:18.690385096Z] [2434] interrupted done
1642[2025-11-28T11:03:18.690590696Z] [2432] cancel done
1643[2025-11-28T11:03:18.791167734Z] [2435] cancel done
1644[2025-11-28T11:03:18.893014807Z] [2440] interrupted
1645[2025-11-28T11:03:18.993335401Z] [2440] interrupted done
1646[2025-11-28T11:03:18.993530932Z] [2438] cancel done
1647[2025-11-28T11:03:19.094121849Z] [2441] cancel done
1648[2025-11-28T11:03:19.195897030Z] [2446] interrupted
1649[2025-11-28T11:03:19.296160784Z] [2446] interrupted done
1650[2025-11-28T11:03:19.296324462Z] [2444] cancel done
1651[2025-11-28T11:03:19.396963926Z] [2447] cancel done
1652[2025-11-28T11:03:19.498723450Z] [2452] interrupted
1653[2025-11-28T11:03:19.599045994Z] [2452] interrupted done
1654[2025-11-28T11:03:19.599214455Z] [2450] cancel done
1655[2025-11-28T11:03:19.699770524Z] [2453] cancel done
1656[2025-11-28T11:03:19.801504832Z] [2458] interrupted
1657[2025-11-28T11:03:19.901801361Z] [2458] interrupted done
1658[2025-11-28T11:03:19.901998027Z] [2456] cancel done
1659[2025-11-28T11:03:20.002550531Z] [2459] cancel done
1660[2025-11-28T11:03:20.104340182Z] [2464] interrupted
1661[2025-11-28T11:03:20.204673235Z] [2464] interrupted done
1662[2025-11-28T11:03:20.204885250Z] [2462] cancel done
1663[2025-11-28T11:03:20.305409119Z] [2465] cancel done
1664[2025-11-28T11:03:20.407099470Z] [2470] interrupted
1665[2025-11-28T11:03:20.507440240Z] [2470] interrupted done
1666[2025-11-28T11:03:20.507664069Z] [2468] cancel done
1667[info] - should block until the fork completes (stress test)
1668[info] + iteration 1
1669[info] + iteration 2
1670[info] + iteration 3
1671[info] + iteration 4
1672[info] + iteration 5
1673[info] + iteration 6
1674[info] + iteration 7
1675[info] + iteration 8
1676[info] + iteration 9
1677[info] + iteration 10
1678[info] + iteration 11
1679[info] + iteration 12
1680[info] + iteration 13
1681[info] + iteration 14
1682[info] + iteration 15
1683[info] + iteration 16
1684[info] + iteration 17
1685[info] + iteration 18
1686[info] + iteration 19
1687[info] + iteration 20
1688[info] cancelNow
1689[2025-11-28T11:03:20.712149573Z] [2471] cancel done
1690[2025-11-28T11:03:21.212469259Z] [2473] interrupted done
1691[info] - should return immediately, and wait for forks when scope completes
1692[info] - should (when followed by a joinEither) catch InterruptedException with which a fork ends
1693[info] FlowOpsTapTest:
1694[info] - should tap over a flow
1695[info] FlowOpsAlsoToTapTest:
1696[info] alsoToTap
1697[info] - should send to both sinks when other is faster
1698[info] - should send to both sinks when other is slower
1699[info] - should not fail the flow when the other sink fails
1700[info] - should not close the flow when the other sink closes
1701[info] SourceOpsFailedTest:
1702[info] Source.failed
1703[info] - should fail on receive
1704[info] - should be in error
1705[info] FlowOpsDebounceTest:
1706[info] debounce
1707[info] - should not debounce if applied on an empty flow
1708[info] - should not debounce if applied on a flow containing only distinct values
1709[info] - should debounce if applied on a flow containing only repeating values
1710[info] - should debounce if applied on a flow containing repeating elements
1711[info] FlowOpsThrottleTest:
1712[info] throttle
1713[info] - should not throttle the empty source
1714[info] - should throttle to specified elements per time units
1715[info] - should fail to throttle when elements <= 0
1716[info] - should fail to throttle when per lower than 1ms
1717[info] FlowOpsRunToChannelTest:
1718[info] runToChannel
1719[info] - should receive the elements in the flow
1720[info] - should return the original source when running a source-backed flow
1721[info] FlowOpsTimeoutTest:
1722[info] - should timeout
1723[info] FlowOpsZipTest:
1724[info] - should zip two sources
1725[info] FixedRateRepeatTest:
1726[info] repeat
1727[info] - should repeat a function at fixed rate
1728[info] - should repeat a function at fixed rate with initial delay
1729[info] - should repeat a function forever at fixed rate
1730[info] - should repeat a function forever at fixed rate with initial delay
1731[info] ForeachParTest:
1732[info] foreachPar
1733[2025-11-28T11:03:23.346093626Z] [2503] 1
1734[2025-11-28T11:03:23.346119638Z] [2504] 2
1735[2025-11-28T11:03:23.346143856Z] [2505] 3
1736[2025-11-28T11:03:23.346101002Z] [2502] 0
1737[2025-11-28T11:03:23.346182556Z] [2506] 4
1738[2025-11-28T11:03:23.446584945Z] [2508] 6
1739[2025-11-28T11:03:23.446561185Z] [2507] 5
1740[2025-11-28T11:03:23.446636438Z] [2510] 8
1741[2025-11-28T11:03:23.446856796Z] [2511] 9
1742[2025-11-28T11:03:23.446608105Z] [2509] 7
1743[2025-11-28T11:03:23.546900812Z] [2512] 10
1744[2025-11-28T11:03:23.546910295Z] [2513] 11
1745[2025-11-28T11:03:23.547122738Z] [2514] 12
1746[2025-11-28T11:03:23.547160612Z] [2515] 13
1747[2025-11-28T11:03:23.547215343Z] [2516] 14
1748[2025-11-28T11:03:23.647267746Z] [2518] 16
1749[2025-11-28T11:03:23.647260857Z] [2517] 15
1750[2025-11-28T11:03:23.647441449Z] [2519] 17
1751[info] - should run computations in parallel
1752[info] - should run not more computations than limit
1753[2025-11-28T11:03:23.981923145Z] [2684] exception
1754[2025-11-28T11:03:23.982489994Z] [24] catch
1755[2025-11-28T11:03:24.282655759Z] [24] all done
1756[info] - should interrupt other computations in one fails
1757[info] FlowOpsFutureSourceTest:
1758[info] futureSource
1759[info] - should return the original future failure when future fails
1760[info] - should return future's source values
1761[info] SourceOpsTransformTest:
1762[info] Source.transform
1763[info] - should transform a source using a simple map
1764[info] - should transform a source using a complex chain of operations
1765[info] - should transform an infinite source
1766[info] - should transform an infinite source (stress test)
1767[info] RateLimiterTest:
1768[info] fixed rate RateLimiter
1769[info] - should drop operation when rate limit is exceeded
1770[info] - should restart rate limiter after given duration
1771[info] - should block operation when rate limit is exceeded
1772[info] - should respect time constraints when blocking
1773[info] - should respect time constraints when blocking concurrently
1774[info] - should allow to run more long running operations concurrently than max rate when not considering operation's time
1775[info] - should not allow to run more long running operations concurrently than max rate when considering operation time
1776[info] sliding window RateLimiter
1777[info] - should drop operation when rate limit is exceeded
1778[info] - should restart rate limiter after given duration
1779[info] - should block operation when rate limit is exceeded
1780[info] - should respect time constraints when blocking
1781[info] - should respect time constraints when blocking concurrently
1782[info] - should not allow to run more operations when operations are still running when considering operation time
1783[info] - should not allow to run more operations when operations are still running in window span when considering operation time
1784[info] bucket RateLimiter
1785[info] - should drop operation when rate limit is exceeded
1786[info] - should refill token after time elapsed from last refill and not before
1787[info] - should block operation when rate limit is exceeded
1788[info] - should respect time constraints when blocking
1789[info] - should respect time constraints when blocking concurrently
1790[info] FlowOpsSplitOnTest:
1791[info] splitOn
1792[info] - should split an empty flow
1793[info] - should split a flow with no delimiters
1794[info] - should split a flow with single-element delimiter at the beginning
1795[info] - should split a flow with single-element delimiter at the end
1796[info] - should split a flow with single-element delimiter in the middle
1797[info] - should split a flow with multiple single-element delimiters
1798[info] - should split a flow with adjacent single-element delimiters
1799[info] - should split a flow with only single-element delimiters
1800[info] - should split a flow with multi-element delimiter at the beginning
1801[info] - should split a flow with multi-element delimiter at the end
1802[info] - should split a flow with multi-element delimiter in the middle
1803[info] - should split a flow with multiple multi-element delimiters
1804[info] - should split a flow with adjacent multi-element delimiters
1805[info] - should split a flow with only multi-element delimiters
1806[info] - should split a flow with overlapping patterns
1807[info] - should split a flow with complex overlapping patterns
1808[info] - should handle empty delimiter by returning entire input as single chunk
1809[info] - should handle empty delimiter with empty input
1810[info] - should split a flow with string elements
1811[info] - should split a flow with multi-element string delimiter
1812[info] - should handle delimiter longer than input
1813[info] - should handle single element matching start of multi-element delimiter
1814[info] - should handle partial delimiter match at end
1815[info] - should split with delimiter that appears multiple times in sequence
1816[info] - should handle error propagation
1817[info] - should split a large flow efficiently
1818[info] - should handle repeated delimiter pattern correctly
1819[info] - should properly split when given a flow with delimiter patterns
1820[info] - should handle erroneous scenarios when delimiter processing fails
1821[info] FlowOpsMergeTest:
1822[info] merge
1823[info] - should merge two simple flows
1824[info] - should merge two async flows
1825[info] - should merge with a tick flow
1826[info] - should propagate error from the left
1827[info] - should propagate error from the right
1828[info] - should merge two flows, emitting all elements from the left when right completes
1829[info] - should merge two flows, emitting all elements from the right when left completes
1830[info] - should merge two flows, completing the resulting flow when the left flow completes
1831[info] - should merge two flows, completing the resulting flow when the right flow completes
1832[info] FlowOpsFlatMapTest:
1833[info] flatMap
1834[info] - should flatten simple flows
1835[info] - should propagate errors
1836[info] FlowCompanionIOOpsTest:
1837[info] fromInputStream
1838[info] - should handle an empty InputStream
1839[info] - should handle InputStream shorter than buffer size
1840[info] - should handle InputStream longer than buffer size
1841[info] - should close the InputStream after reading it
1842[info] - should close the InputStream after failing with an exception
1843[info] fromFile
1844[info] - should read content from a file smaller than chunk size
1845[info] - should read content from a file larger than chunk size
1846[info] - should handle an empty file
1847[info] - should throw an exception for missing file
1848[info] - should throw an exception if path is a directory
1849[info] CollectParTest:
1850[info] collectPar
1851[info] - should output the same type as input
1852[info] - should run computations in parallel
1853[info] - should run not more computations than limit
1854[2025-11-28T11:04:08.616120480Z] [5995] exception
1855[2025-11-28T11:04:08.616630031Z] [24] catch
1856[2025-11-28T11:04:08.916803679Z] [24] all done
1857[info] - should interrupt other computations in one fails
1858[info] FlowOpsFutureTest:
1859[info] future
1860[info] - should return the original future failure when future fails
1861[info] - should return future value
1862[info] FlowOpsInterleaveTest:
1863[info] interleave
1864[info] - should interleave with an empty source
1865[info] - should interleave two sources with default segment size
1866[info] - should interleave two sources with default segment size and different lengths
1867[info] - should interleave two sources with custom segment size
1868[info] - should interleave two sources with custom segment size and different lengths
1869[info] - should interleave two sources with different lengths and complete eagerly
1870[info] - should when empty, interleave with a non-empty source and complete eagerly
1871[info] - should interleave with an empty source and complete eagerly
1872[info] ParTest:
1873[info] par
1874[2025-11-28T11:04:09.045108633Z] [6025] b
1875[2025-11-28T11:04:09.145062934Z] [6024] a
1876[2025-11-28T11:04:09.145449364Z] [24] done
1877[info] - should run computations in parallel
1878[2025-11-28T11:04:09.247155696Z] [6028] exception
1879[2025-11-28T11:04:09.247615042Z] [24] catch
1880[2025-11-28T11:04:09.547817153Z] [24] all done
1881[info] - should interrupt other computations in one fails
1882[info] parLimit
1883[info] - should run up to the given number of computations in parallel
1884[2025-11-28T11:04:10.254185869Z] [6041] x
1885[2025-11-28T11:04:10.254195303Z] [6040] x
1886[2025-11-28T11:04:10.264534766Z] [6043] exception
1887[2025-11-28T11:04:10.264938626Z] [24] catch
1888[2025-11-28T11:04:10.565137739Z] [24] all done
1889[info] - should interrupt other computations in one fails
1890[info] parEither
1891[2025-11-28T11:04:10.667123247Z] [6047] b
1892[2025-11-28T11:04:10.767117779Z] [6046] a
1893[2025-11-28T11:04:10.767755473Z] [24] done
1894[info] - should run computations in parallel
1895[2025-11-28T11:04:10.868967313Z] [6050] exception
1896[2025-11-28T11:04:11.169679281Z] [24] all done
1897[info] - should interrupt other computations in one fails
1898[info] SelectWithinTest:
1899[info] selectWithin
1900[info] - should select a clause that can complete immediately
1901[info] - should throw TimeoutException when no clause can complete within the timeout
1902[info] - should select a source that has a value immediately
1903[info] - should throw TimeoutException when no source has a value within the timeout
1904[info] - should work with single clause
1905[info] - should work with three clauses
1906[info] - should work with four clauses
1907[info] - should work with five clauses
1908[info] - should work with sequence of clauses
1909[info] selectWithin with sources
1910[info] - should work with single source
1911[info] - should work with two sources
1912[info] - should work with three sources
1913[info] - should work with four sources
1914[info] - should work with five sources
1915[info] - should work with sequence of sources
1916[info] selectWithin timeout scenarios
1917[info] - should throw TimeoutException for single clause timeout
1918[info] - should throw TimeoutException for single source timeout
1919[info] - should throw TimeoutException for sequence of clauses timeout
1920[info] - should throw TimeoutException for sequence of sources timeout
1921[info] - should throw TimeoutException immediately for empty sequence of clauses
1922[info] - should throw TimeoutException immediately for empty sequence of sources
1923[info] selectWithin error scenarios
1924[info] - should throw ChannelClosedException when channel is closed with done
1925[info] - should throw ChannelClosedException when channel is closed with error
1926[info] - should prioritize ready channels over closed ones
1927[info] selectWithin performance
1928[info] - should not timeout when clause can complete immediately
1929[info] - should respect timeout duration
1930[info] selectWithin with send clauses
1931[info] - should work with send clauses
1932[info] - should throw TimeoutException when send clauses cannot complete
1933[info] ImmediateRepeatTest:
1934[info] repeat
1935[info] - should repeat a function immediately
1936[info] - should repeat a function immediately with initial delay
1937[info] - should repeat a function immediately forever
1938[info] - should repeat a function immediately forever with initial delay
1939[info] FlowOpsDebounceByTest:
1940[info] debounceBy
1941[info] - should not debounce if applied on an empty flow
1942[info] - should not debounce if applied on a flow containing only distinct f(value)
1943[info] - should debounce if applied on a flow containing repeating f(value)
1944[info] - should debounce subsequent odd/prime numbers
1945[info] RaceTest:
1946[info] timeout
1947[2025-11-28T11:04:12.550320811Z] [24] timeout
1948[2025-11-28T11:04:12.550447177Z] [24] done
1949[info] - should short-circuit a long computation
1950[2025-11-28T11:04:15.051995604Z] [6112] no timeout
1951[2025-11-28T11:04:15.052338914Z] [24] done
1952[info] - should not interrupt a short computation
1953[info] timeoutOption
1954[2025-11-28T11:04:18.054265665Z] [24] done: None
1955[info] - should short-circuit a long computation
1956[info] race
1957[2025-11-28T11:04:20.556377236Z] [6117] fast
1958[info] - should race a slower and faster computation
1959[2025-11-28T11:04:22.058896216Z] [6118] fast
1960[info] - should race a faster and slower computation
1961[2025-11-28T11:04:23.261188478Z] [6120] error
1962[2025-11-28T11:04:23.561194851Z] [6121] slow
1963[info] - should return the first successful computation to complete
1964[info] - should add other exceptions as suppressed
1965[info] - should treat ControlThrowable as a non-fatal exception
1966[info] - should immediately rethrow other fatal exceptions
1967[info] raceEither
1968[2025-11-28T11:04:25.168132376Z] [6131] error
1969[2025-11-28T11:04:25.468150081Z] [6132] slow
1970[info] - should return the first successful computation to complete
1971[info] raceResult
1972[info] - should immediately return when a normal exception occurs
1973[info] - should immediately return when a control exception occurs
1974[info] - should immediately return when a fatal exception occurs
1975[info] SourceOpsFutureSourceTest:
1976[info] SourceOps.futureSource
1977[info] - should return the original future failure when future fails
1978[info] - should return the original future failure when future fails with ExecutionException
1979[info] - should return future's source values
1980[info] FilterParTest:
1981[info] filterPar
1982[info] - should output the same type as input
1983[info] - should run computations in parallel
1984[info] - should run not more computations than limit
1985[2025-11-28T11:04:27.217608392Z] [6337] exception
1986[2025-11-28T11:04:27.218183022Z] [24] catch
1987[2025-11-28T11:04:27.518351274Z] [24] all done
1988[info] - should interrupt other computations in one fails
1989[info] SourceOpsForeachTest:
1990[info] Source.foreach
1991[info] - should iterate over a source
1992[info] - should iterate over a source using for-syntax
1993[info] - should convert source to a list
1994[info] FlowOpsEmptyTest:
1995[info] empty
1996[info] - should be empty
1997[info] SupervisedTest:
1998[info] supervised
1999[2025-11-28T11:04:27.626597620Z] [6343] b
2000[2025-11-28T11:04:27.726579596Z] [6342] a
2001[2025-11-28T11:04:27.726819726Z] [24] done
2002[info] - should wait until all forks complete
2003[2025-11-28T11:04:27.828481794Z] [6346] b
2004[2025-11-28T11:04:27.828756467Z] [24] done
2005[info] - should only wait until user forks complete
2006[2025-11-28T11:04:27.930390868Z] [6350] b
2007[2025-11-28T11:04:28.030938008Z] [24] done
2008[info] - should interrupt once any fork ends with an exception
2009[2025-11-28T11:04:28.233024419Z] [24] done
2010[info] - should interrupt main body once a fork ends with an exception
2011[2025-11-28T11:04:28.334747898Z] [6356] b
2012[2025-11-28T11:04:28.534466059Z] [6354] a
2013[2025-11-28T11:04:28.534674004Z] [24] done
2014[info] - should not interrupt if an unsupervised fork ends with an exception
2015[info] - should handle interruption of multiple forks with `joinEither` correctly
2016[info] SourceOpsFutureTest:
2017[info] Source.future
2018[info] - should return the original future failure when future fails
2019[info] - should return the original future failure when future fails with ExecutionException
2020[info] - should return future value
2021[info] CircuitBreakerStateMachineTest:
2022[info] Circuit Breaker state machine
2023[info] - should keep closed with healthy metrics
2024[info] - should go to open after surpassing failure threshold
2025[info] - should go straight to half open after surpassing failure threshold with defined waitDurationOpenState = 0
2026[info] - should go back to open after timeout in half open passed
2027[info] - should update counter of completed operations in halfOpen state
2028[info] - should go back to closed after enough calls with good metrics are recorded
2029[info] - should go to open after enough calls with bad metrics are recorded in halfOpen state
2030[info] - should go to closed after enough calls with good metrics are recorded in halfOpen state
2031[info] - should go to half open after waitDurationOpenState passes
2032[info] OxAppTest:
2033[info] OxApp
2034[info] - should work in happy case
2035[info] OxApp
2036Clean shutdown timed out after 100 milliseconds, exiting.
2037[info] - should shutdown despite cleanup taking a long time
2038[info] OxApp
2039[info] - should work in interrupted case
2040[info] OxApp
2041[info] - should work in failed case
2042[info] OxApp
2043[info] - should report any non-interrupted exceptions that occur during shutdown
2044[info] OxApp.Simple
2045[info] - should work in happy case
2046[info] OxApp.Simple
2047[info] - should work in interrupted case
2048[info] OxApp.Simple
2049[info] - should work in failed case
2050[info] OxApp.WithErrors
2051[info] - should work in happy case
2052[info] OxApp.WithErrors
2053[info] - should work in interrupted case
2054[info] OxApp.WithErrors
2055[info] - should work in failed case
2056[info] FlowOpsMapStatefulConcatTest:
2057[info] mapStatefulConcat
2058[info] - should deduplicate
2059[info] - should count consecutive
2060[info] - should propagate errors in the mapping function
2061[info] - should propagate errors in the completion callback
2062[info] FlowOpsDropTest:
2063[info] drop
2064[info] - should not drop from the empty flow
2065[info] - should drop elements from the source
2066[info] - should return empty source when more elements than source length was dropped
2067[info] - should not drop when 'n == 0'
2068[info] FlowOpsRepeatEvalTest:
2069[info] repeatEval
2070[info] - should evaluate the element before each send
2071[info] - should evaluate the element before each send, as long as it's defined
2072[info] FlowPublisherPekkoTest:
2073[INFO] [11/28/2025 12:04:29.301] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2074[info] - a simple flow should emit elements to be processed by a pekko stream
2075[INFO] [11/28/2025 12:04:29.556] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2076[info] - a concurrent flow should emit elements to be processed by a pekko stream
2077[INFO] [11/28/2025 12:04:29.587] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2078[info] - create a flow from a simple publisher
2079[INFO] [11/28/2025 12:04:29.872] [] [CoordinatedShutdown(pekko://test)] Running CoordinatedShutdown with reason [ActorSystemTerminateReason]
2080[info] - create a flow from a concurrent publisher
2081[info] ImmediateRetryTest:
2082[info] Immediate retry
2083[info] - should retry a succeeding function
2084[info] - should fail fast when a function is not worth retrying
2085[info] - should retry a succeeding function with a custom success condition
2086[info] - should retry a failing function
2087[info] - should retry a failing function forever
2088[info] - should retry a succeeding Either
2089[info] - should fail fast when an Either is not worth retrying
2090[info] - should retry a succeeding Either with a custom success condition
2091[info] - should retry a failing Either
2092[info] Adaptive retry with immediate config
2093[info] - should retry a failing adaptive
2094[info] - should stop retrying after emptying bucket
2095[info] - should not pay exceptionCost if result T is going to be retried and shouldPayPenaltyCost returns false
2096[info] FlowOpsGroupByTest:
2097[info] groupBy
2098[info] - should handle empty flow
2099[info] - should handle single-element flow
2100[info] - should handle single-element flow (stress test)
2101[info] - should create simple groups without reaching parallelism limit
2102[info] - should complete groups when the parallelism limit is reached
2103[info] - should not exceed the parallelism limit, completing earliest-active child flows as done when necessary
2104[info] - should handle large flows
2105[info] - should handle non-integer grouping keys
2106[info] - should group when child processing is slow
2107[info] - should propagate errors from child flows
2108[info] - should propagate errors from child flows when the parent is blocked on sending
2109[info] - should RuntimeException errors from parent flows
2110[info] - should throw an IllegalStateException when a child stream is completed by user-provided transformation
2111[info] FlowOpsLastOptionTest:
2112[info] lastOption
2113[info] - should return None for the empty flow
2114[info] - should return Some for a non-empty
2115[info] - should throw ChannelClosedException.Error with exception and message that was thrown during retrieval
2116[info] FlowOpsMapParUnorderedTest:
2117[info] mapParUnordered
2118[info] - should map over a source with parallelism limit 1
2119[info] - should map over a source with parallelism limit 2
2120[info] - should map over a source with parallelism limit 3
2121[info] - should map over a source with parallelism limit 4
2122[info] - should map over a source with parallelism limit 5
2123[info] - should map over a source with parallelism limit 6
2124[info] - should map over a source with parallelism limit 7
2125[info] - should map over a source with parallelism limit 8
2126[info] - should map over a source with parallelism limit 9
2127[info] - should map over a source with parallelism limit 10
2128[info] - should map over a source with parallelism limit 10 (stress test)
2129[info] + iteration 1
2130[info] + iteration 2
2131[info] + iteration 3
2132[info] + iteration 4
2133[info] + iteration 5
2134[info] + iteration 6
2135[info] + iteration 7
2136[info] + iteration 8
2137[info] + iteration 9
2138[info] + iteration 10
2139[info] + iteration 11
2140[info] + iteration 12
2141[info] + iteration 13
2142[info] + iteration 14
2143[info] + iteration 15
2144[info] + iteration 16
2145[info] + iteration 17
2146[info] + iteration 18
2147[info] + iteration 19
2148[info] + iteration 20
2149[info] + iteration 21
2150[info] + iteration 22
2151[info] + iteration 23
2152[info] + iteration 24
2153[info] + iteration 25
2154[info] + iteration 26
2155[info] + iteration 27
2156[info] + iteration 28
2157[info] + iteration 29
2158[info] + iteration 30
2159[info] + iteration 31
2160[info] + iteration 32
2161[info] + iteration 33
2162[info] + iteration 34
2163[info] + iteration 35
2164[info] + iteration 36
2165[info] + iteration 37
2166[info] + iteration 38
2167[info] + iteration 39
2168[info] + iteration 40
2169[info] + iteration 41
2170[info] + iteration 42
2171[info] + iteration 43
2172[info] + iteration 44
2173[info] + iteration 45
2174[info] + iteration 46
2175[info] + iteration 47
2176[info] + iteration 48
2177[info] + iteration 49
2178[info] + iteration 50
2179[info] + iteration 51
2180[info] + iteration 52
2181[info] + iteration 53
2182[info] + iteration 54
2183[info] + iteration 55
2184[info] + iteration 56
2185[info] + iteration 57
2186[info] + iteration 58
2187[info] + iteration 59
2188[info] + iteration 60
2189[info] + iteration 61
2190[info] + iteration 62
2191[info] + iteration 63
2192[info] + iteration 64
2193[info] + iteration 65
2194[info] + iteration 66
2195[info] + iteration 67
2196[info] + iteration 68
2197[info] + iteration 69
2198[info] + iteration 70
2199[info] + iteration 71
2200[info] + iteration 72
2201[info] + iteration 73
2202[info] + iteration 74
2203[info] + iteration 75
2204[info] + iteration 76
2205[info] + iteration 77
2206[info] + iteration 78
2207[info] + iteration 79
2208[info] + iteration 80
2209[info] + iteration 81
2210[info] + iteration 82
2211[info] + iteration 83
2212[info] + iteration 84
2213[info] + iteration 85
2214[info] + iteration 86
2215[info] + iteration 87
2216[info] + iteration 88
2217[info] + iteration 89
2218[info] + iteration 90
2219[info] + iteration 91
2220[info] + iteration 92
2221[info] + iteration 93
2222[info] + iteration 94
2223[info] + iteration 95
2224[info] + iteration 96
2225[info] + iteration 97
2226[info] + iteration 98
2227[info] + iteration 99
2228[info] + iteration 100
2229[info] - should propagate errors
2230[2025-11-28T11:04:45.515360091Z] [208015] done
2231[2025-11-28T11:04:45.515359998Z] [208016] done
2232[2025-11-28T11:04:45.615860609Z] [208018] exception
2233[info] - should complete running forks and not start new ones when the mapping function fails
2234[2025-11-28T11:04:45.918703408Z] [208022] 1
2235[2025-11-28T11:04:45.918710599Z] [208023] 2
2236[info] - should complete running forks and not start new ones when the upstream fails
2237[2025-11-28T11:04:46.332001733Z] [208030] done
2238[2025-11-28T11:04:46.331991542Z] [208029] done
2239[2025-11-28T11:04:46.432672315Z] [208032] exception
2240[info] - should cancel running forks when the surrounding scope closes due to an error
2241[info] - should emit downstream as soon as a value is ready, regardless of the incoming order
2242[info] ControlTest:
2243[info] timeout
2244[2025-11-28T11:04:47.940545070Z] [24] timeout
2245[2025-11-28T11:04:47.940679832Z] [24] done
2246[info] - should short-circuit a long computation
2247[info] - should pass through the exception of failed computation
2248[2025-11-28T11:04:50.042999098Z] [208046] no timeout
2249[2025-11-28T11:04:50.043328311Z] [24] done
2250[info] - should not interrupt a short computation
2251[2025-11-28T11:04:52.444866180Z] [208048] done
2252[info] - should block a thread indefinitely
2253[info] timeoutOption
2254[info] - should pass through the exception of failed computation
2255[info] timeoutEither
2256[info] - should pass through the exception of failed computation
2257[info] FlowOpsConcatTest:
2258[info] - should concatenate flows
2259[info] - should concatenate flows using ++
2260[info] - should not evaluate subsequent flows if there's a failure
2261[info] FlowOpsFactoryMethodsTest:
2262[info] factory methods
2263[info] - should create a flow from a fork
2264[info] - should create an iterating flow
2265[info] - should unfold a function
2266[info] - should produce a range
2267[info] ChannelTest:
2268[info] channel with capacity 0
2269[info] - should send and receive two spaced elements
2270[info] - should send and receive many elements, with concurrent senders & receivers
2271[info] - should select from two receives, if the last one has elements
2272[info] - should select from three receives, if the last one has elements
2273[info] - should select a receive from multiple channels
2274[info] - should select a receive until all channels are done
2275[info] - should properly report channel state
2276[info] - should select from a non-done channel, if a value is immediately available
2277[info] - should select a done channel, when the channel is done immediately
2278[info] - should select a done channel, when the channel becomes done
2279[info] channel with capacity 1
2280[info] - should send and receive two spaced elements
2281[info] - should send and receive many elements, with concurrent senders & receivers
2282[info] - should select from two receives, if the last one has elements
2283[info] - should select from three receives, if the last one has elements
2284[info] - should select a receive from multiple channels
2285[info] - should select a receive until all channels are done
2286[info] - should properly report channel state
2287[info] - should select from a non-done channel, if a value is immediately available
2288[info] - should select a done channel, when the channel is done immediately
2289[info] - should select a done channel, when the channel becomes done
2290[info] channel with capacity 2
2291[info] - should send and receive two spaced elements
2292[info] - should send and receive many elements, with concurrent senders & receivers
2293[info] - should select from two receives, if the last one has elements
2294[info] - should select from three receives, if the last one has elements
2295[info] - should select a receive from multiple channels
2296[info] - should select a receive until all channels are done
2297[info] - should properly report channel state
2298[info] - should select from a non-done channel, if a value is immediately available
2299[info] - should select a done channel, when the channel is done immediately
2300[info] - should select a done channel, when the channel becomes done
2301[info] channel with capacity 100
2302[info] - should send and receive two spaced elements
2303[info] - should send and receive many elements, with concurrent senders & receivers
2304[info] - should select from two receives, if the last one has elements
2305[info] - should select from three receives, if the last one has elements
2306[info] - should select a receive from multiple channels
2307[info] - should select a receive until all channels are done
2308[info] - should properly report channel state
2309[info] - should select from a non-done channel, if a value is immediately available
2310[info] - should select a done channel, when the channel is done immediately
2311[info] - should select a done channel, when the channel becomes done
2312[info] channel with capacity 10000
2313[info] - should send and receive two spaced elements
2314[info] - should send and receive many elements, with concurrent senders & receivers
2315[info] - should select from two receives, if the last one has elements
2316[info] - should select from three receives, if the last one has elements
2317[info] - should select a receive from multiple channels
2318[info] - should select a receive until all channels are done
2319[info] - should properly report channel state
2320[info] - should select from a non-done channel, if a value is immediately available
2321[info] - should select a done channel, when the channel is done immediately
2322[info] - should select a done channel, when the channel becomes done
2323[info] buffered channel
2324[info] - should select a send when one is available
2325[info] channel
2326[info] - should receive from a channel until done
2327[info] - should not receive from a channel in case of an error
2328[info] rendezvous channel
2329[info] - should wait until elements are transmitted
2330[info] - should select a send when a receive is waiting
2331[info] - should select a send or receive depending on availability
2332[info] default
2333[info] - should use the default value if the clauses are not satisfiable
2334[info] - should not use the default value if a clause is satisfiable
2335[info] - should not use the default value if the channel is done
2336[info] - should use the default value once a source is done (buffered channel, stress test)
2337[info] + iteration 1
2338[info] + iteration 2
2339[info] + iteration 3
2340[info] + iteration 4
2341[info] + iteration 5
2342[info] + iteration 6
2343[info] + iteration 7
2344[info] + iteration 8
2345[info] + iteration 9
2346[info] + iteration 10
2347[info] + iteration 11
2348[info] + iteration 12
2349[info] + iteration 13
2350[info] + iteration 14
2351[info] + iteration 15
2352[info] + iteration 16
2353[info] + iteration 17
2354[info] + iteration 18
2355[info] + iteration 19
2356[info] + iteration 20
2357[info] + iteration 21
2358[info] + iteration 22
2359[info] + iteration 23
2360[info] + iteration 24
2361[info] + iteration 25
2362[info] + iteration 26
2363[info] + iteration 27
2364[info] + iteration 28
2365[info] + iteration 29
2366[info] + iteration 30
2367[info] + iteration 31
2368[info] + iteration 32
2369[info] + iteration 33
2370[info] + iteration 34
2371[info] + iteration 35
2372[info] + iteration 36
2373[info] + iteration 37
2374[info] + iteration 38
2375[info] + iteration 39
2376[info] + iteration 40
2377[info] + iteration 41
2378[info] + iteration 42
2379[info] + iteration 43
2380[info] + iteration 44
2381[info] + iteration 45
2382[info] + iteration 46
2383[info] + iteration 47
2384[info] + iteration 48
2385[info] + iteration 49
2386[info] + iteration 50
2387[info] + iteration 51
2388[info] + iteration 52
2389[info] + iteration 53
2390[info] + iteration 54
2391[info] + iteration 55
2392[info] + iteration 56
2393[info] + iteration 57
2394[info] + iteration 58
2395[info] + iteration 59
2396[info] + iteration 60
2397[info] + iteration 61
2398[info] + iteration 62
2399[info] + iteration 63
2400[info] + iteration 64
2401[info] + iteration 65
2402[info] + iteration 66
2403[info] + iteration 67
2404[info] + iteration 68
2405[info] + iteration 69
2406[info] + iteration 70
2407[info] + iteration 71
2408[info] + iteration 72
2409[info] + iteration 73
2410[info] + iteration 74
2411[info] + iteration 75
2412[info] + iteration 76
2413[info] + iteration 77
2414[info] + iteration 78
2415[info] + iteration 79
2416[info] + iteration 80
2417[info] + iteration 81
2418[info] + iteration 82
2419[info] + iteration 83
2420[info] + iteration 84
2421[info] + iteration 85
2422[info] + iteration 86
2423[info] + iteration 87
2424[info] + iteration 88
2425[info] + iteration 89
2426[info] + iteration 90
2427[info] + iteration 91
2428[info] + iteration 92
2429[info] + iteration 93
2430[info] + iteration 94
2431[info] + iteration 95
2432[info] + iteration 96
2433[info] + iteration 97
2434[info] + iteration 98
2435[info] + iteration 99
2436[info] + iteration 100
2437[info] FlowOpsOrElseTest:
2438[info] orElse
2439[info] - should emit elements only from the original source when it is not empty
2440[info] - should emit elements only from the alternative source when the original source is created empty
2441[info] - should emit elements only from the alternative source when the original source is empty
2442[info] - should return failed source when the original source is failed
2443[info] FlowOpsForeachTest:
2444[info] foreach
2445[info] - should iterate over a flow
2446[info] - should convert flow to a list
2447[info] SourceOpsEmptyTest:
2448[info] Source.empty
2449[info] - should be done
2450[info] - should be empty
2451[info] FlowOpsFlattenParTest:
2452[info] flattenPar
2453[info] - should pipe all elements of the child flows into the output flow
2454[info] - should handle empty flow
2455[info] - should handle singleton flow
2456[info] - should not flatten nested flows
2457[info] - should handle subsequent flatten calls
2458[info] - should run at most parallelism child flows
2459[info] - should pipe elements realtime
2460[info] - should propagate error of any of the child flows and stop piping
2461[info] - should propagate error of the parent flow and stop piping
2462[info] FlowOpsRetryTest:
2463[info] Flow.retry
2464[info] - should successfully run a flow without retries when no errors occur
2465[info] - should retry a failing flow with immediate schedule
2466[info] - should retry a failing flow with fixed interval schedule
2467[info] - should not retry a flow which fails downstream
2468[info] - should fail after exhausting all retry attempts
2469[info] - should use custom ResultPolicy to determine retry worthiness
2470[info] - should handle empty flows correctly
2471[info] - should handle flows that complete successfully on first attempt
2472[info] - should retry the entire flow when processing fails
2473[info] - should work with complex flows containing transformations
2474[info] - should not retry a flow which uses .take and control exceptions
2475[info] LocalTest:
2476[info] fork locals
2477[2025-11-28T11:04:56.721692832Z] [24] main mid
2478[2025-11-28T11:04:56.822544432Z] [313241] In f1 = x
2479[2025-11-28T11:04:56.822728563Z] [24] result = a
2480[2025-11-28T11:04:56.922704883Z] [313243] In f3 = z
2481[2025-11-28T11:04:56.922896728Z] [24] result = a
2482[info] - should properly propagate values using supervisedWhere
2483[2025-11-28T11:04:56.924307040Z] [24] main mid
2484[2025-11-28T11:04:57.025160379Z] [313244] In f1 = x
2485[2025-11-28T11:04:57.025326138Z] [24] result = a
2486[2025-11-28T11:04:57.125740163Z] [313246] In f3 = z
2487[2025-11-28T11:04:57.125994783Z] [24] result = a
2488[info] - should properly propagate values using unsupervisedWhere
2489[2025-11-28T11:04:57.127721424Z] [313248] nested1 = x
2490[2025-11-28T11:04:57.128181421Z] [313249] nested2 = x
2491[info] - should propagate values across multiple scopes
2492[2025-11-28T11:04:57.128347840Z] [24] outer = a
2493[info] - should propagate errors from forks created within local values
2494[2025-11-28T11:04:57.129950974Z] [24] v1
2495[2025-11-28T11:04:57.130289695Z] [24] v2
2496[info] - should correctly set & unset fork locals when an exception is thrown
2497[2025-11-28T11:04:57.130395492Z] [24] RuntimeException
2498[2025-11-28T11:04:57.130455824Z] [24] v1
2499[2025-11-28T11:04:57.130883691Z] [24] v1_1
2500[2025-11-28T11:04:57.130945166Z] [24] v2_1
2501[2025-11-28T11:04:57.131327899Z] [24] v1_2
2502[2025-11-28T11:04:57.131388632Z] [24] v2_2
2503[2025-11-28T11:04:57.131426943Z] [24] v1_1
2504[2025-11-28T11:04:57.131470504Z] [24] v2_1
2505[info] - should correctly set & unset multiple fork locals
2506[info] FlowOpsSampleTest:
2507[info] sample
2508[info] - should not sample anything from an empty flow
2509[info] - should not sample anything when 'n == 0'
2510[info] - should sample every element of the flow when 'n == 1'
2511[info] - should sample every nth element of the flow
2512[info] FlowOpsDrainTest:
2513[info] drain
2514[info] - should drain all elements
2515[info] - should run any side-effects that are part of the flow
2516[info] - should merge with another flow
2517[info] ActorTest:
2518[info] - should invoke methods on the actor
2519[info] - should protect the internal state of the actor
2520[info] - should run the close callback before re-throwing the exception
2521[info] - should end the scope when an exception is thrown when handling .tell
2522[info] - should throw a channel closed exception when the actor's scope becomes closed
2523[info] FlowOpsSlidingTest:
2524[info] sliding
2525[info] - should create sliding windows for n = 2 and step = 1
2526[info] - should create sliding windows for n = 3 and step = 1
2527[info] - should create sliding windows for n = 2 and step = 2
2528[info] - should create sliding windows for n = 3 and step = 2
2529[info] - should create sliding windows for n = 1 and step = 2
2530[info] - should create sliding windows for n = 2 and step = 3
2531[info] - should create sliding windows for n = 2 and step = 3 (with 1 element remaining in the end)
2532[info] - should return failed source when the original source is failed
2533[info] FlowOpsTickTest:
2534[info] - should tick regularly
2535[info] - should tick immediately in case of a slow consumer, and then resume normal
2536Starting build for ProjectRef(file:/build/repo/,cron) (cron)... [3/6]
2537Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2538[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0-RC2/classes ...
2539[info] done compiling
2540[info] compiling 1 Scala source to /build/repo/cron/target/scala-3.8.0-RC2/test-classes ...
2541[info] done compiling
2542[info] CronScheduleTest:
2543[info] repeat with cron schedule
2544[info] - should repeat a function every second (once)
2545[info] - should repeat a function every second (three times)
2546[info] - should provide initial delay
2547Starting build for ProjectRef(file:/build/repo/,otelContext) (otel-context)... [4/6]
2548Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2549[info] compiling 1 Scala source to /build/repo/otel-context/target/scala-3.8.0-RC2/classes ...
2550[info] done compiling
2551Starting build for ProjectRef(file:/build/repo/,kafka) (kafka)... [5/6]
2552Compile scalacOptions: -encoding, utf8, -unchecked, -language:experimental.macros, -language:higherKinds, -language:implicitConversions, -Xkind-projector, -Wvalue-discard, -Wnonunit-statement, -Wunused:implicits, -Wunused:explicits, -Wunused:imports, -Wunused:locals, -Wunused:params, -Wunused:privates, -Wconf:msg=can be rewritten automatically under:s, -source:3.8
2553[info] compiling 9 Scala sources to /build/repo/kafka/target/scala-3.8.0-RC2/classes ...
2554[warn] -- [E198] Unused Symbol Warning: /build/repo/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:45:14
2555[warn] 45 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
2556[warn] | ^^^^^^^
2557[warn] | unused explicit parameter
2558[warn] one warning found
2559[info] done compiling
2560[info] compiling 6 Scala sources to /build/repo/kafka/target/scala-3.8.0-RC2/test-classes ...
2561[info] done compiling
256212:05:09.155 [pool-67-thread-1] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
2563 add.partitions.to.txn.retry.backoff.max.ms = 100
2564 add.partitions.to.txn.retry.backoff.ms = 20
2565 advertised.listeners = BROKER://localhost:6001
2566 alter.config.policy.class.name = null
2567 alter.log.dirs.replication.quota.window.num = 11
2568 alter.log.dirs.replication.quota.window.size.seconds = 1
2569 authorizer.class.name =
2570 auto.create.topics.enable = true
2571 auto.leader.rebalance.enable = true
2572 background.threads = 10
2573 broker.heartbeat.interval.ms = 2000
2574 broker.id = 0
2575 broker.rack = null
2576 broker.session.timeout.ms = 9000
2577 client.quota.callback.class = null
2578 compression.gzip.level = -1
2579 compression.lz4.level = 9
2580 compression.type = producer
2581 compression.zstd.level = 3
2582 connection.failed.authentication.delay.ms = 100
2583 connections.max.idle.ms = 600000
2584 connections.max.reauth.ms = 0
2585 controlled.shutdown.enable = true
2586 controller.listener.names = CONTROLLER
2587 controller.performance.always.log.threshold.ms = 2000
2588 controller.performance.sample.period.ms = 60000
2589 controller.quorum.append.linger.ms = 25
2590 controller.quorum.bootstrap.servers = []
2591 controller.quorum.election.backoff.max.ms = 1000
2592 controller.quorum.election.timeout.ms = 1000
2593 controller.quorum.fetch.timeout.ms = 2000
2594 controller.quorum.request.timeout.ms = 2000
2595 controller.quorum.retry.backoff.ms = 20
2596 controller.quorum.voters = [0@localhost:6002]
2597 controller.quota.window.num = 11
2598 controller.quota.window.size.seconds = 1
2599 controller.socket.timeout.ms = 30000
2600 create.topic.policy.class.name = null
2601 default.replication.factor = 1
2602 delegation.token.expiry.check.interval.ms = 3600000
2603 delegation.token.expiry.time.ms = 86400000
2604 delegation.token.max.lifetime.ms = 604800000
2605 delegation.token.secret.key = null
2606 delete.records.purgatory.purge.interval.requests = 1
2607 delete.topic.enable = true
2608 early.start.listeners = null
2609 fetch.max.bytes = 57671680
2610 fetch.purgatory.purge.interval.requests = 1000
2611 group.consumer.assignors = [uniform, range]
2612 group.consumer.heartbeat.interval.ms = 5000
2613 group.consumer.max.heartbeat.interval.ms = 15000
2614 group.consumer.max.session.timeout.ms = 60000
2615 group.consumer.max.size = 2147483647
2616 group.consumer.migration.policy = bidirectional
2617 group.consumer.min.heartbeat.interval.ms = 5000
2618 group.consumer.min.session.timeout.ms = 45000
2619 group.consumer.regex.refresh.interval.ms = 600000
2620 group.consumer.session.timeout.ms = 45000
2621 group.coordinator.append.linger.ms = 5
2622 group.coordinator.rebalance.protocols = [classic, consumer, streams]
2623 group.coordinator.threads = 4
2624 group.initial.rebalance.delay.ms = 3000
2625 group.max.session.timeout.ms = 1800000
2626 group.max.size = 2147483647
2627 group.min.session.timeout.ms = 6000
2628 group.share.assignors = [simple]
2629 group.share.delivery.count.limit = 5
2630 group.share.enable = false
2631 group.share.heartbeat.interval.ms = 5000
2632 group.share.max.heartbeat.interval.ms = 15000
2633 group.share.max.record.lock.duration.ms = 60000
2634 group.share.max.session.timeout.ms = 60000
2635 group.share.max.share.sessions = 2000
2636 group.share.max.size = 200
2637 group.share.min.heartbeat.interval.ms = 5000
2638 group.share.min.record.lock.duration.ms = 15000
2639 group.share.min.session.timeout.ms = 45000
2640 group.share.partition.max.record.locks = 2000
2641 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
2642 group.share.record.lock.duration.ms = 30000
2643 group.share.session.timeout.ms = 45000
2644 group.streams.heartbeat.interval.ms = 5000
2645 group.streams.max.heartbeat.interval.ms = 15000
2646 group.streams.max.session.timeout.ms = 60000
2647 group.streams.max.size = 2147483647
2648 group.streams.max.standby.replicas = 2
2649 group.streams.min.heartbeat.interval.ms = 5000
2650 group.streams.min.session.timeout.ms = 45000
2651 group.streams.num.standby.replicas = 0
2652 group.streams.session.timeout.ms = 45000
2653 initial.broker.registration.timeout.ms = 60000
2654 inter.broker.listener.name = BROKER
2655 internal.metadata.delete.delay.millis = 60000
2656 internal.metadata.log.segment.bytes = null
2657 internal.metadata.max.batch.size.in.bytes = 8388608
2658 internal.metadata.max.fetch.size.in.bytes = 8388608
2659 kafka.metrics.polling.interval.secs = 10
2660 kafka.metrics.reporters = []
2661 leader.imbalance.check.interval.seconds = 300
2662 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
2663 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
2664 log.cleaner.backoff.ms = 15000
2665 log.cleaner.dedupe.buffer.size = 1048577
2666 log.cleaner.delete.retention.ms = 86400000
2667 log.cleaner.enable = true
2668 log.cleaner.io.buffer.load.factor = 0.9
2669 log.cleaner.io.buffer.size = 524288
2670 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
2671 log.cleaner.max.compaction.lag.ms = 9223372036854775807
2672 log.cleaner.min.cleanable.ratio = 0.5
2673 log.cleaner.min.compaction.lag.ms = 0
2674 log.cleaner.threads = 1
2675 log.cleanup.policy = [delete]
2676 log.dir = /tmp/kafka-logs
2677 log.dir.failure.timeout.ms = 30000
2678 log.dirs = /tmp/kafka-logs4345019044203235659
2679 log.flush.interval.messages = 1
2680 log.flush.interval.ms = null
2681 log.flush.offset.checkpoint.interval.ms = 60000
2682 log.flush.scheduler.interval.ms = 9223372036854775807
2683 log.flush.start.offset.checkpoint.interval.ms = 60000
2684 log.index.interval.bytes = 4096
2685 log.index.size.max.bytes = 10485760
2686 log.initial.task.delay.ms = 30000
2687 log.local.retention.bytes = -2
2688 log.local.retention.ms = -2
2689 log.message.timestamp.after.max.ms = 3600000
2690 log.message.timestamp.before.max.ms = 9223372036854775807
2691 log.message.timestamp.type = CreateTime
2692 log.preallocate = false
2693 log.retention.bytes = -1
2694 log.retention.check.interval.ms = 300000
2695 log.retention.hours = 168
2696 log.retention.minutes = null
2697 log.retention.ms = null
2698 log.roll.hours = 168
2699 log.roll.jitter.hours = 0
2700 log.roll.jitter.ms = null
2701 log.roll.ms = null
2702 log.segment.bytes = 1073741824
2703 log.segment.delete.delay.ms = 60000
2704 max.connection.creation.rate = 2147483647
2705 max.connections = 2147483647
2706 max.connections.per.ip = 2147483647
2707 max.connections.per.ip.overrides =
2708 max.incremental.fetch.session.cache.slots = 1000
2709 max.request.partition.size.limit = 2000
2710 message.max.bytes = 1048588
2711 metadata.log.dir = null
2712 metadata.log.max.record.bytes.between.snapshots = 20971520
2713 metadata.log.max.snapshot.interval.ms = 3600000
2714 metadata.log.segment.bytes = 1073741824
2715 metadata.log.segment.ms = 604800000
2716 metadata.max.idle.interval.ms = 500
2717 metadata.max.retention.bytes = 104857600
2718 metadata.max.retention.ms = 604800000
2719 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
2720 metrics.num.samples = 2
2721 metrics.recording.level = INFO
2722 metrics.sample.window.ms = 30000
2723 min.insync.replicas = 1
2724 node.id = 0
2725 num.io.threads = 8
2726 num.network.threads = 3
2727 num.partitions = 1
2728 num.recovery.threads.per.data.dir = 2
2729 num.replica.alter.log.dirs.threads = null
2730 num.replica.fetchers = 1
2731 offset.metadata.max.bytes = 4096
2732 offsets.commit.timeout.ms = 5000
2733 offsets.load.buffer.size = 5242880
2734 offsets.retention.check.interval.ms = 600000
2735 offsets.retention.minutes = 10080
2736 offsets.topic.compression.codec = 0
2737 offsets.topic.num.partitions = 1
2738 offsets.topic.replication.factor = 1
2739 offsets.topic.segment.bytes = 104857600
2740 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
2741 process.roles = [broker, controller]
2742 producer.id.expiration.check.interval.ms = 600000
2743 producer.id.expiration.ms = 86400000
2744 producer.purgatory.purge.interval.requests = 1000
2745 queued.max.request.bytes = -1
2746 queued.max.requests = 500
2747 quota.window.num = 11
2748 quota.window.size.seconds = 1
2749 remote.fetch.max.wait.ms = 500
2750 remote.list.offsets.request.timeout.ms = 30000
2751 remote.log.index.file.cache.total.size.bytes = 1073741824
2752 remote.log.manager.copier.thread.pool.size = 10
2753 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
2754 remote.log.manager.copy.quota.window.num = 11
2755 remote.log.manager.copy.quota.window.size.seconds = 1
2756 remote.log.manager.expiration.thread.pool.size = 10
2757 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
2758 remote.log.manager.fetch.quota.window.num = 11
2759 remote.log.manager.fetch.quota.window.size.seconds = 1
2760 remote.log.manager.task.interval.ms = 30000
2761 remote.log.manager.task.retry.backoff.max.ms = 30000
2762 remote.log.manager.task.retry.backoff.ms = 500
2763 remote.log.manager.task.retry.jitter = 0.2
2764 remote.log.manager.thread.pool.size = 2
2765 remote.log.metadata.custom.metadata.max.bytes = 128
2766 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
2767 remote.log.metadata.manager.class.path = null
2768 remote.log.metadata.manager.impl.prefix = rlmm.config.
2769 remote.log.metadata.manager.listener.name = null
2770 remote.log.reader.max.pending.tasks = 100
2771 remote.log.reader.threads = 10
2772 remote.log.storage.manager.class.name = null
2773 remote.log.storage.manager.class.path = null
2774 remote.log.storage.manager.impl.prefix = rsm.config.
2775 remote.log.storage.system.enable = false
2776 replica.fetch.backoff.ms = 1000
2777 replica.fetch.max.bytes = 1048576
2778 replica.fetch.min.bytes = 1
2779 replica.fetch.response.max.bytes = 10485760
2780 replica.fetch.wait.max.ms = 500
2781 replica.high.watermark.checkpoint.interval.ms = 5000
2782 replica.lag.time.max.ms = 30000
2783 replica.selector.class = null
2784 replica.socket.receive.buffer.bytes = 65536
2785 replica.socket.timeout.ms = 30000
2786 replication.quota.window.num = 11
2787 replication.quota.window.size.seconds = 1
2788 request.timeout.ms = 30000
2789 sasl.client.callback.handler.class = null
2790 sasl.enabled.mechanisms = [GSSAPI]
2791 sasl.jaas.config = null
2792 sasl.kerberos.kinit.cmd = /usr/bin/kinit
2793 sasl.kerberos.min.time.before.relogin = 60000
2794 sasl.kerberos.principal.to.local.rules = [DEFAULT]
2795 sasl.kerberos.service.name = null
2796 sasl.kerberos.ticket.renew.jitter = 0.05
2797 sasl.kerberos.ticket.renew.window.factor = 0.8
2798 sasl.login.callback.handler.class = null
2799 sasl.login.class = null
2800 sasl.login.connect.timeout.ms = null
2801 sasl.login.read.timeout.ms = null
2802 sasl.login.refresh.buffer.seconds = 300
2803 sasl.login.refresh.min.period.seconds = 60
2804 sasl.login.refresh.window.factor = 0.8
2805 sasl.login.refresh.window.jitter = 0.05
2806 sasl.login.retry.backoff.max.ms = 10000
2807 sasl.login.retry.backoff.ms = 100
2808 sasl.mechanism.controller.protocol = GSSAPI
2809 sasl.mechanism.inter.broker.protocol = GSSAPI
2810 sasl.oauthbearer.assertion.algorithm = RS256
2811 sasl.oauthbearer.assertion.claim.aud = null
2812 sasl.oauthbearer.assertion.claim.exp.seconds = 300
2813 sasl.oauthbearer.assertion.claim.iss = null
2814 sasl.oauthbearer.assertion.claim.jti.include = false
2815 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
2816 sasl.oauthbearer.assertion.claim.sub = null
2817 sasl.oauthbearer.assertion.file = null
2818 sasl.oauthbearer.assertion.private.key.file = null
2819 sasl.oauthbearer.assertion.private.key.passphrase = null
2820 sasl.oauthbearer.assertion.template.file = null
2821 sasl.oauthbearer.client.credentials.client.id = null
2822 sasl.oauthbearer.client.credentials.client.secret = null
2823 sasl.oauthbearer.clock.skew.seconds = 30
2824 sasl.oauthbearer.expected.audience = null
2825 sasl.oauthbearer.expected.issuer = null
2826 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
2827 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
2828 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
2829 sasl.oauthbearer.jwks.endpoint.url = null
2830 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
2831 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
2832 sasl.oauthbearer.scope = null
2833 sasl.oauthbearer.scope.claim.name = scope
2834 sasl.oauthbearer.sub.claim.name = sub
2835 sasl.oauthbearer.token.endpoint.url = null
2836 sasl.server.callback.handler.class = null
2837 sasl.server.max.receive.size = 524288
2838 security.inter.broker.protocol = PLAINTEXT
2839 security.providers = null
2840 server.max.startup.time.ms = 9223372036854775807
2841 share.coordinator.append.linger.ms = 5
2842 share.coordinator.cold.partition.snapshot.interval.ms = 300000
2843 share.coordinator.load.buffer.size = 5242880
2844 share.coordinator.snapshot.update.records.per.snapshot = 500
2845 share.coordinator.state.topic.compression.codec = 0
2846 share.coordinator.state.topic.min.isr = 2
2847 share.coordinator.state.topic.num.partitions = 50
2848 share.coordinator.state.topic.prune.interval.ms = 300000
2849 share.coordinator.state.topic.replication.factor = 3
2850 share.coordinator.state.topic.segment.bytes = 104857600
2851 share.coordinator.threads = 1
2852 share.coordinator.write.timeout.ms = 5000
2853 share.fetch.purgatory.purge.interval.requests = 1000
2854 socket.connection.setup.timeout.max.ms = 30000
2855 socket.connection.setup.timeout.ms = 10000
2856 socket.listen.backlog.size = 50
2857 socket.receive.buffer.bytes = 102400
2858 socket.request.max.bytes = 104857600
2859 socket.send.buffer.bytes = 102400
2860 ssl.allow.dn.changes = false
2861 ssl.allow.san.changes = false
2862 ssl.cipher.suites = []
2863 ssl.client.auth = none
2864 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
2865 ssl.endpoint.identification.algorithm = https
2866 ssl.engine.factory.class = null
2867 ssl.key.password = null
2868 ssl.keymanager.algorithm = SunX509
2869 ssl.keystore.certificate.chain = null
2870 ssl.keystore.key = null
2871 ssl.keystore.location = null
2872 ssl.keystore.password = null
2873 ssl.keystore.type = JKS
2874 ssl.principal.mapping.rules = DEFAULT
2875 ssl.protocol = TLSv1.3
2876 ssl.provider = null
2877 ssl.secure.random.implementation = null
2878 ssl.trustmanager.algorithm = PKIX
2879 ssl.truststore.certificates = null
2880 ssl.truststore.location = null
2881 ssl.truststore.password = null
2882 ssl.truststore.type = JKS
2883 telemetry.max.bytes = 1048576
2884 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
2885 transaction.max.timeout.ms = 900000
2886 transaction.partition.verification.enable = true
2887 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
2888 transaction.state.log.load.buffer.size = 5242880
2889 transaction.state.log.min.isr = 1
2890 transaction.state.log.num.partitions = 50
2891 transaction.state.log.replication.factor = 1
2892 transaction.state.log.segment.bytes = 104857600
2893 transaction.two.phase.commit.enable = false
2894 transactional.id.expiration.ms = 604800000
2895 unclean.leader.election.enable = false
2896 unclean.leader.election.interval.ms = 300000
2897 unstable.api.versions.enable = false
2898 unstable.feature.versions.enable = false
2899
290012:05:09.368 [pool-67-thread-1] INFO k.u.Log4jControllerRegistration$ - Registered `kafka:type=kafka.Log4jController` MBean
290112:05:09.411 [pool-67-thread-1] INFO i.g.e.EmbeddedKafka$ - [KafkaRaftServer nodeId=0] Rewriting /tmp/kafka-logs4345019044203235659/meta.properties
290212:05:09.460 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Starting controller
290312:05:09.770 [pool-67-thread-1] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
290412:05:09.799 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(CONTROLLER)
290512:05:09.804 [pool-67-thread-1] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint CONTROLLER. Endpoint is now READY.
290612:05:09.805 [pool-67-thread-1] INFO k.s.SharedServer - [SharedServer id=0] Starting SharedServer
290712:05:09.851 [pool-67-thread-1] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
290812:05:09.852 [pool-67-thread-1] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs4345019044203235659] Reloading from producer snapshot and rebuilding producer state from offset 0
290912:05:09.852 [pool-67-thread-1] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__cluster_metadata-0, dir=/tmp/kafka-logs4345019044203235659] Producer state recovery took 0ms for snapshot load and 0ms for segment recovery from offset 0
291012:05:09.872 [pool-67-thread-1] INFO k.r.KafkaMetadataLog$ - Initialized snapshots with IDs SortedSet() from /tmp/kafka-logs4345019044203235659/__cluster_metadata-0
291112:05:09.882 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Starting
291212:05:09.894 [pool-67-thread-1] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Reading KRaft snapshot and log as part of the initialization
291312:05:09.896 [pool-67-thread-1] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting voters are VoterSet(voters={0=VoterNode(voterKey=ReplicaKey(id=0, directoryId=<undefined>), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/127.0.0.1:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:0])})
291412:05:09.899 [pool-67-thread-1] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Starting request manager with static voters: [localhost:6002 (id: 0 rack: null isFenced: false)]
291512:05:09.903 [pool-67-thread-1] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1863, highWatermark=Optional.empty) from null
291612:05:09.978 [pool-67-thread-1] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1863, highWatermark=Optional.empty) from null
291712:05:09.981 [pool-67-thread-1] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1073, highWatermark=Optional.empty) from UnattachedState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, voters=[0], electionTimeoutMs=1863, highWatermark=Optional.empty)
291812:05:09.983 [pool-67-thread-1] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to CandidateState(localId=0, localDirectoryId=1IEk1f33dz_GsvBzHQUJSQ, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1147) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1073, highWatermark=Optional.empty)
291912:05:09.988 [pool-67-thread-1] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to CandidateState(localId=0, localDirectoryId=1IEk1f33dz_GsvBzHQUJSQ, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1147) from ProspectiveState(epoch=0, leaderId=OptionalInt.empty, votedKey=Optional.empty, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), electionTimeoutMs=1073, highWatermark=Optional.empty)
292012:05:09.993 [pool-67-thread-1] INFO o.a.k.r.QuorumState - [RaftManager id=0] Attempting durable transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=1IEk1f33dz_GsvBzHQUJSQ), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=1IEk1f33dz_GsvBzHQUJSQ, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1147)
292112:05:09.995 [pool-67-thread-1] INFO o.a.k.r.QuorumState - [RaftManager id=0] Completed transition to Leader(localVoterNode=VoterNode(voterKey=ReplicaKey(id=0, directoryId=1IEk1f33dz_GsvBzHQUJSQ), listeners=Endpoints(endpoints={ListenerName(CONTROLLER)=localhost/<unresolved>:6002}), supportedKRaftVersion=SupportedVersionRange[min_version:0, max_version:1]), epoch=1, epochStartOffset=0, highWatermark=Optional.empty, voterStates={0=ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional.empty, lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)}) from CandidateState(localId=0, localDirectoryId=1IEk1f33dz_GsvBzHQUJSQ, epoch=1, epochElection=EpochElection(voterStates={0=VoterState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), state=GRANTED)}), highWatermark=Optional.empty, electionTimeoutMs=1147)
292212:05:10.014 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Starting
292312:05:10.014 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Starting
292412:05:10.029 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] initializeNewPublishers: The loader is still catching up because we have loaded up to offset -1, but the high water mark is 1
292512:05:10.031 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for controller quorum voters future
292612:05:10.031 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for controller quorum voters future
292712:05:10.033 [kafka-0-raft-io-thread] INFO o.a.k.r.LeaderState - [RaftManager id=0] High watermark set to LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)]) for the first time for epoch 1 based on indexOfHw 0 and voters [ReplicaState(replicaKey=ReplicaKey(id=0, directoryId=<undefined>), endOffset=Optional[LogOffsetMetadata(offset=1, metadata=Optional[(segmentBaseOffset=0,relativePositionInSegment=91)])], lastFetchTimestamp=-1, lastCaughtUpTimestamp=-1, hasAcknowledgedLeader=true)]
292812:05:10.041 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.image.loader.MetadataLoader@1269315993
292912:05:10.043 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.image.loader.MetadataLoader@1269315993 to 0 since there are no snapshots
293012:05:10.045 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader is still catching up because we have not loaded a controller record as of offset 0 and high water mark is 1
293112:05:10.066 [pool-67-thread-1] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task writeNoOpRecord to run every 500 ms
293212:05:10.066 [pool-67-thread-1] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task maybeFenceStaleBroker to run every 1125 ms
293312:05:10.066 [pool-67-thread-1] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electPreferred to run every 300000 ms
293412:05:10.067 [pool-67-thread-1] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task electUnclean to run every 300000 ms
293512:05:10.067 [pool-67-thread-1] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task expireDelegationTokens to run every 3600000 ms
293612:05:10.067 [pool-67-thread-1] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Registering periodic task generatePeriodicPerformanceMessage to run every 60000 ms
293712:05:10.068 [pool-67-thread-1] INFO o.a.k.c.QuorumController - [QuorumController id=0] Creating new QuorumController with clusterId cERjULLDRBGv7lPJWPu8sA
293812:05:10.069 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Registered the listener org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@1524306807
293912:05:10.069 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Setting the next offset of org.apache.kafka.controller.QuorumController$QuorumMetaLogListener@1524306807 to 0 since there are no snapshots
294012:05:10.071 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] Becoming the active controller at epoch 1, next write offset 1.
294112:05:10.077 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Starting
294212:05:10.077 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Starting
294312:05:10.078 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Starting
294412:05:10.078 [quorum-controller-0-event-handler] WARN o.a.k.c.QuorumController - [QuorumController id=0] Performing controller activation. The metadata log appears to be empty. Appending 1 bootstrap record(s) in metadata transaction at metadata.version 4.1-IV1 from bootstrap source 'the default bootstrap'.
294512:05:10.080 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Starting
294612:05:10.081 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed BeginTransactionRecord(name='Bootstrap records') at offset 1.
294712:05:10.081 [quorum-controller-0-event-handler] INFO o.a.k.c.FeatureControlManager - [QuorumController id=0] Replayed a FeatureLevelRecord setting metadata.version to 4.1-IV1
294812:05:10.081 [quorum-controller-0-event-handler] INFO o.a.k.c.OffsetControlManager - [QuorumController id=0] Replayed EndTransactionRecord() at offset 3.
294912:05:10.083 [quorum-controller-0-event-handler] INFO o.a.k.c.PeriodicTaskControlManager - [QuorumController id=0] Activated periodic tasks: electPreferred, electUnclean, expireDelegationTokens, generatePeriodicPerformanceMessage, maybeFenceStaleBroker, writeNoOpRecord
295012:05:10.092 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
295112:05:10.104 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for the controller metadata publishers to be installed
295212:05:10.104 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for the controller metadata publishers to be installed
295312:05:10.104 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] initializeNewPublishers: The loader is still catching up because we have not loaded a controller record as of offset 0 and high water mark is 1
295412:05:10.105 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Enabling request processing.
295512:05:10.111 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] maybePublishMetadata(LOG_DELTA): The loader finished catching up to the current high water mark of 4
295612:05:10.112 [pool-67-thread-1] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6002.
295712:05:10.114 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing SnapshotGenerator with a snapshot at offset 3
295812:05:10.115 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing KRaftMetadataCachePublisher with a snapshot at offset 3
295912:05:10.115 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing FeaturesPublisher with a snapshot at offset 3
296012:05:10.119 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Starting
296112:05:10.120 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the authorizer futures to be completed
296212:05:10.120 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the authorizer futures to be completed
296312:05:10.120 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Waiting for all of the SocketServer Acceptors to be started
296412:05:10.120 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
296512:05:10.120 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTDOWN to STARTING
296612:05:10.121 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] initialized channel manager.
296712:05:10.121 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] maybeSendControllerRegistration: cannot register yet because the metadata.version is not known yet.
296812:05:10.122 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Starting broker
296912:05:10.121 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
297012:05:10.127 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Starting
297112:05:10.128 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Starting
297212:05:10.128 [kafka-0-metadata-loader-event-handler] INFO o.a.k.m.p.FeaturesPublisher - [ControllerServer id=0] Loaded new metadata FinalizedFeatures[metadataVersion=4.1-IV1, finalizedFeatures={metadata.version=27}, finalizedFeaturesEpoch=3].
297312:05:10.128 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationsPublisher with a snapshot at offset 3
297412:05:10.128 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerRegistrationManager with a snapshot at offset 3
297512:05:10.128 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Starting
297612:05:10.129 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicConfigPublisher controller id=0 with a snapshot at offset 3
297712:05:10.129 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicClientQuotaPublisher controller id=0 with a snapshot at offset 3
297812:05:10.130 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Starting
297912:05:10.130 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DynamicTopicClusterQuotaPublisher controller id=0 with a snapshot at offset 3
298012:05:10.131 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ScramPublisher controller id=0 with a snapshot at offset 3
298112:05:10.132 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing DelegationTokenPublisher controller id=0 with a snapshot at offset 3
298212:05:10.134 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing ControllerMetadataMetricsPublisher with a snapshot at offset 3
298312:05:10.135 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing AclPublisher controller id=0 with a snapshot at offset 3
298412:05:10.138 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] sendControllerRegistration: attempting to send ControllerRegistrationRequestData(controllerId=0, incarnationId=Fx8es18YQ563VpClOcTo3w, zkMigrationReady=false, listeners=[Listener(name='CONTROLLER', host='localhost', port=6002, securityProtocol=0)], features=[Feature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), Feature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), Feature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), Feature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)])
298512:05:10.151 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for controller quorum voters future
298612:05:10.152 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for controller quorum voters future
298712:05:10.153 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Starting
298812:05:10.154 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
298912:05:10.161 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Starting
299012:05:10.192 [pool-67-thread-1] INFO k.n.ConnectionQuotas - Updated connection-accept-rate max connection creation rate to 2147483647
299112:05:10.195 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(BROKER)
299212:05:10.201 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Starting
299312:05:10.201 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299412:05:10.208 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Starting
299512:05:10.209 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
299612:05:10.217 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed RegisterControllerRecord containing ControllerRegistration(id=0, incarnationId=Fx8es18YQ563VpClOcTo3w, zkMigrationReady=false, listeners=[Endpoint(listenerName='CONTROLLER', securityProtocol=PLAINTEXT, host='localhost', port=6002)], supportedFeatures={eligible.leader.replicas.version: 0-1, group.version: 0-1, kraft.version: 0-1, metadata.version: 7-27, share.version: 0-1, transaction.version: 0-2}).
299712:05:10.224 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Starting
299812:05:10.225 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Starting
299912:05:10.226 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Starting
300012:05:10.227 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Starting
300112:05:10.227 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Starting
300212:05:10.228 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Starting
300312:05:10.240 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] Our registration has been persisted to the metadata log.
300412:05:10.243 [controller-0-to-controller-registration-channel-manager] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] RegistrationResponseHandler: controller acknowledged ControllerRegistrationRequest.
300512:05:10.248 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Starting
300612:05:10.267 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Starting
300712:05:10.273 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Starting
300812:05:10.274 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Starting
300912:05:10.275 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Starting
301012:05:10.285 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Starting
301112:05:10.285 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Starting
301212:05:10.285 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Starting
301312:05:10.285 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Starting
301412:05:10.307 [pool-67-thread-1] INFO k.l.LogManager - Unable to read the broker epoch in /tmp/kafka-logs4345019044203235659.
301512:05:10.308 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Starting
301612:05:10.308 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Recorded new KRaft controller, from now on will use node localhost:6002 (id: 0 rack: null isFenced: false)
301712:05:10.311 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Incarnation 0EERV87jS8--bfa80LDwcw of broker 0 in cluster cERjULLDRBGv7lPJWPu8sA is now STARTING.
301812:05:10.320 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Starting
301912:05:10.321 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] No previous registration found for broker 0. New incarnation ID is 0EERV87jS8--bfa80LDwcw. Generated 0 record(s) to clean up previous incarnations. New broker epoch is 5.
302012:05:10.328 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed initial RegisterBrokerRecord for broker 0: RegisterBrokerRecord(brokerId=0, isMigratingZkBroker=false, incarnationId=0EERV87jS8--bfa80LDwcw, brokerEpoch=5, endPoints=[BrokerEndpoint(name='BROKER', host='localhost', port=6001, securityProtocol=0)], features=[BrokerFeature(name='group.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='transaction.version', minSupportedVersion=0, maxSupportedVersion=2), BrokerFeature(name='eligible.leader.replicas.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='kraft.version', minSupportedVersion=0, maxSupportedVersion=1), BrokerFeature(name='metadata.version', minSupportedVersion=7, maxSupportedVersion=27), BrokerFeature(name='share.version', minSupportedVersion=0, maxSupportedVersion=1)], rack=null, fenced=true, inControlledShutdown=false, logDirs=[1IEk1f33dz_GsvBzHQUJSQ])
302112:05:10.334 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Starting
302212:05:10.350 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker metadata publishers to be installed
302312:05:10.351 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker metadata publishers to be installed
302412:05:10.351 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the controller to acknowledge that we are caught up
302512:05:10.351 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing MetadataVersionPublisher(id=0) with a snapshot at offset 5
302612:05:10.351 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerMetadataPublisher with a snapshot at offset 5
302712:05:10.353 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Successfully registered broker 0 with broker epoch 5
302812:05:10.354 [kafka-0-metadata-loader-event-handler] INFO k.s.m.BrokerMetadataPublisher - [BrokerMetadataPublisher id=0] Publishing initial metadata at offset OffsetAndEpoch[offset=5, epoch=1] with metadata.version Optional[4.1-IV1].
302912:05:10.355 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loading logs from log dirs ArrayBuffer(/tmp/kafka-logs4345019044203235659)
303012:05:10.358 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - No logs found to be loaded in /tmp/kafka-logs4345019044203235659
303112:05:10.361 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has caught up. Transitioning from STARTING to RECOVERY.
303212:05:10.362 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the controller to acknowledge that we are caught up
303312:05:10.362 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the initial broker metadata update to be published
303412:05:10.367 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Loaded 0 logs in 11ms
303512:05:10.368 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log cleanup with a period of 300000 ms.
303612:05:10.369 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Starting log flusher with a default period of 9223372036854775807 ms.
303712:05:10.376 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.LogCleaner - Starting the log cleaner
303812:05:10.376 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker is in RECOVERY.
303912:05:10.383 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Starting
304012:05:10.387 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Starting
304112:05:10.388 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Starting
304212:05:10.391 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Starting up.
304312:05:10.391 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Startup complete.
304412:05:10.392 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Starting up.
304512:05:10.394 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Starting
304612:05:10.394 [kafka-0-metadata-loader-event-handler] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Startup complete.
304712:05:10.394 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Starting up.
304812:05:10.395 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Startup complete.
304912:05:10.401 [kafka-0-metadata-loader-event-handler] INFO o.a.k.i.l.MetadataLoader - [MetadataLoader id=0] InitializeNewPublishers: initializing BrokerRegistrationTracker(id=0) with a snapshot at offset 5
305012:05:10.401 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the initial broker metadata update to be published
305112:05:10.402 [pool-67-thread-1] INFO o.a.k.c.c.AbstractConfig - KafkaConfig values:
3052 add.partitions.to.txn.retry.backoff.max.ms = 100
3053 add.partitions.to.txn.retry.backoff.ms = 20
3054 advertised.listeners = BROKER://localhost:6001
3055 alter.config.policy.class.name = null
3056 alter.log.dirs.replication.quota.window.num = 11
3057 alter.log.dirs.replication.quota.window.size.seconds = 1
3058 authorizer.class.name =
3059 auto.create.topics.enable = true
3060 auto.leader.rebalance.enable = true
3061 background.threads = 10
3062 broker.heartbeat.interval.ms = 2000
3063 broker.id = 0
3064 broker.rack = null
3065 broker.session.timeout.ms = 9000
3066 client.quota.callback.class = null
3067 compression.gzip.level = -1
3068 compression.lz4.level = 9
3069 compression.type = producer
3070 compression.zstd.level = 3
3071 connection.failed.authentication.delay.ms = 100
3072 connections.max.idle.ms = 600000
3073 connections.max.reauth.ms = 0
3074 controlled.shutdown.enable = true
3075 controller.listener.names = CONTROLLER
3076 controller.performance.always.log.threshold.ms = 2000
3077 controller.performance.sample.period.ms = 60000
3078 controller.quorum.append.linger.ms = 25
3079 controller.quorum.bootstrap.servers = []
3080 controller.quorum.election.backoff.max.ms = 1000
3081 controller.quorum.election.timeout.ms = 1000
3082 controller.quorum.fetch.timeout.ms = 2000
3083 controller.quorum.request.timeout.ms = 2000
3084 controller.quorum.retry.backoff.ms = 20
3085 controller.quorum.voters = [0@localhost:6002]
3086 controller.quota.window.num = 11
3087 controller.quota.window.size.seconds = 1
3088 controller.socket.timeout.ms = 30000
3089 create.topic.policy.class.name = null
3090 default.replication.factor = 1
3091 delegation.token.expiry.check.interval.ms = 3600000
3092 delegation.token.expiry.time.ms = 86400000
3093 delegation.token.max.lifetime.ms = 604800000
3094 delegation.token.secret.key = null
3095 delete.records.purgatory.purge.interval.requests = 1
3096 delete.topic.enable = true
3097 early.start.listeners = null
3098 fetch.max.bytes = 57671680
3099 fetch.purgatory.purge.interval.requests = 1000
3100 group.consumer.assignors = [uniform, range]
3101 group.consumer.heartbeat.interval.ms = 5000
3102 group.consumer.max.heartbeat.interval.ms = 15000
3103 group.consumer.max.session.timeout.ms = 60000
3104 group.consumer.max.size = 2147483647
3105 group.consumer.migration.policy = bidirectional
3106 group.consumer.min.heartbeat.interval.ms = 5000
3107 group.consumer.min.session.timeout.ms = 45000
3108 group.consumer.regex.refresh.interval.ms = 600000
3109 group.consumer.session.timeout.ms = 45000
3110 group.coordinator.append.linger.ms = 5
3111 group.coordinator.rebalance.protocols = [classic, consumer, streams]
3112 group.coordinator.threads = 4
3113 group.initial.rebalance.delay.ms = 3000
3114 group.max.session.timeout.ms = 1800000
3115 group.max.size = 2147483647
3116 group.min.session.timeout.ms = 6000
3117 group.share.assignors = [simple]
3118 group.share.delivery.count.limit = 5
3119 group.share.enable = false
3120 group.share.heartbeat.interval.ms = 5000
3121 group.share.max.heartbeat.interval.ms = 15000
3122 group.share.max.record.lock.duration.ms = 60000
3123 group.share.max.session.timeout.ms = 60000
3124 group.share.max.share.sessions = 2000
3125 group.share.max.size = 200
3126 group.share.min.heartbeat.interval.ms = 5000
3127 group.share.min.record.lock.duration.ms = 15000
3128 group.share.min.session.timeout.ms = 45000
3129 group.share.partition.max.record.locks = 2000
3130 group.share.persister.class.name = org.apache.kafka.server.share.persister.DefaultStatePersister
3131 group.share.record.lock.duration.ms = 30000
3132 group.share.session.timeout.ms = 45000
3133 group.streams.heartbeat.interval.ms = 5000
3134 group.streams.max.heartbeat.interval.ms = 15000
3135 group.streams.max.session.timeout.ms = 60000
3136 group.streams.max.size = 2147483647
3137 group.streams.max.standby.replicas = 2
3138 group.streams.min.heartbeat.interval.ms = 5000
3139 group.streams.min.session.timeout.ms = 45000
3140 group.streams.num.standby.replicas = 0
3141 group.streams.session.timeout.ms = 45000
3142 initial.broker.registration.timeout.ms = 60000
3143 inter.broker.listener.name = BROKER
3144 internal.metadata.delete.delay.millis = 60000
3145 internal.metadata.log.segment.bytes = null
3146 internal.metadata.max.batch.size.in.bytes = 8388608
3147 internal.metadata.max.fetch.size.in.bytes = 8388608
3148 kafka.metrics.polling.interval.secs = 10
3149 kafka.metrics.reporters = []
3150 leader.imbalance.check.interval.seconds = 300
3151 listener.security.protocol.map = BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
3152 listeners = BROKER://localhost:6001,CONTROLLER://localhost:6002
3153 log.cleaner.backoff.ms = 15000
3154 log.cleaner.dedupe.buffer.size = 1048577
3155 log.cleaner.delete.retention.ms = 86400000
3156 log.cleaner.enable = true
3157 log.cleaner.io.buffer.load.factor = 0.9
3158 log.cleaner.io.buffer.size = 524288
3159 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
3160 log.cleaner.max.compaction.lag.ms = 9223372036854775807
3161 log.cleaner.min.cleanable.ratio = 0.5
3162 log.cleaner.min.compaction.lag.ms = 0
3163 log.cleaner.threads = 1
3164 log.cleanup.policy = [delete]
3165 log.dir = /tmp/kafka-logs
3166 log.dir.failure.timeout.ms = 30000
3167 log.dirs = /tmp/kafka-logs4345019044203235659
3168 log.flush.interval.messages = 1
3169 log.flush.interval.ms = null
3170 log.flush.offset.checkpoint.interval.ms = 60000
3171 log.flush.scheduler.interval.ms = 9223372036854775807
3172 log.flush.start.offset.checkpoint.interval.ms = 60000
3173 log.index.interval.bytes = 4096
3174 log.index.size.max.bytes = 10485760
3175 log.initial.task.delay.ms = 30000
3176 log.local.retention.bytes = -2
3177 log.local.retention.ms = -2
3178 log.message.timestamp.after.max.ms = 3600000
3179 log.message.timestamp.before.max.ms = 9223372036854775807
3180 log.message.timestamp.type = CreateTime
3181 log.preallocate = false
3182 log.retention.bytes = -1
3183 log.retention.check.interval.ms = 300000
3184 log.retention.hours = 168
3185 log.retention.minutes = null
3186 log.retention.ms = null
3187 log.roll.hours = 168
3188 log.roll.jitter.hours = 0
3189 log.roll.jitter.ms = null
3190 log.roll.ms = null
3191 log.segment.bytes = 1073741824
3192 log.segment.delete.delay.ms = 60000
3193 max.connection.creation.rate = 2147483647
3194 max.connections = 2147483647
3195 max.connections.per.ip = 2147483647
3196 max.connections.per.ip.overrides =
3197 max.incremental.fetch.session.cache.slots = 1000
3198 max.request.partition.size.limit = 2000
3199 message.max.bytes = 1048588
3200 metadata.log.dir = null
3201 metadata.log.max.record.bytes.between.snapshots = 20971520
3202 metadata.log.max.snapshot.interval.ms = 3600000
3203 metadata.log.segment.bytes = 1073741824
3204 metadata.log.segment.ms = 604800000
3205 metadata.max.idle.interval.ms = 500
3206 metadata.max.retention.bytes = 104857600
3207 metadata.max.retention.ms = 604800000
3208 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3209 metrics.num.samples = 2
3210 metrics.recording.level = INFO
3211 metrics.sample.window.ms = 30000
3212 min.insync.replicas = 1
3213 node.id = 0
3214 num.io.threads = 8
3215 num.network.threads = 3
3216 num.partitions = 1
3217 num.recovery.threads.per.data.dir = 2
3218 num.replica.alter.log.dirs.threads = null
3219 num.replica.fetchers = 1
3220 offset.metadata.max.bytes = 4096
3221 offsets.commit.timeout.ms = 5000
3222 offsets.load.buffer.size = 5242880
3223 offsets.retention.check.interval.ms = 600000
3224 offsets.retention.minutes = 10080
3225 offsets.topic.compression.codec = 0
3226 offsets.topic.num.partitions = 1
3227 offsets.topic.replication.factor = 1
3228 offsets.topic.segment.bytes = 104857600
3229 principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
3230 process.roles = [broker, controller]
3231 producer.id.expiration.check.interval.ms = 600000
3232 producer.id.expiration.ms = 86400000
3233 producer.purgatory.purge.interval.requests = 1000
3234 queued.max.request.bytes = -1
3235 queued.max.requests = 500
3236 quota.window.num = 11
3237 quota.window.size.seconds = 1
3238 remote.fetch.max.wait.ms = 500
3239 remote.list.offsets.request.timeout.ms = 30000
3240 remote.log.index.file.cache.total.size.bytes = 1073741824
3241 remote.log.manager.copier.thread.pool.size = 10
3242 remote.log.manager.copy.max.bytes.per.second = 9223372036854775807
3243 remote.log.manager.copy.quota.window.num = 11
3244 remote.log.manager.copy.quota.window.size.seconds = 1
3245 remote.log.manager.expiration.thread.pool.size = 10
3246 remote.log.manager.fetch.max.bytes.per.second = 9223372036854775807
3247 remote.log.manager.fetch.quota.window.num = 11
3248 remote.log.manager.fetch.quota.window.size.seconds = 1
3249 remote.log.manager.task.interval.ms = 30000
3250 remote.log.manager.task.retry.backoff.max.ms = 30000
3251 remote.log.manager.task.retry.backoff.ms = 500
3252 remote.log.manager.task.retry.jitter = 0.2
3253 remote.log.manager.thread.pool.size = 2
3254 remote.log.metadata.custom.metadata.max.bytes = 128
3255 remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager
3256 remote.log.metadata.manager.class.path = null
3257 remote.log.metadata.manager.impl.prefix = rlmm.config.
3258 remote.log.metadata.manager.listener.name = null
3259 remote.log.reader.max.pending.tasks = 100
3260 remote.log.reader.threads = 10
3261 remote.log.storage.manager.class.name = null
3262 remote.log.storage.manager.class.path = null
3263 remote.log.storage.manager.impl.prefix = rsm.config.
3264 remote.log.storage.system.enable = false
3265 replica.fetch.backoff.ms = 1000
3266 replica.fetch.max.bytes = 1048576
3267 replica.fetch.min.bytes = 1
3268 replica.fetch.response.max.bytes = 10485760
3269 replica.fetch.wait.max.ms = 500
3270 replica.high.watermark.checkpoint.interval.ms = 5000
3271 replica.lag.time.max.ms = 30000
3272 replica.selector.class = null
3273 replica.socket.receive.buffer.bytes = 65536
3274 replica.socket.timeout.ms = 30000
3275 replication.quota.window.num = 11
3276 replication.quota.window.size.seconds = 1
3277 request.timeout.ms = 30000
3278 sasl.client.callback.handler.class = null
3279 sasl.enabled.mechanisms = [GSSAPI]
3280 sasl.jaas.config = null
3281 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3282 sasl.kerberos.min.time.before.relogin = 60000
3283 sasl.kerberos.principal.to.local.rules = [DEFAULT]
3284 sasl.kerberos.service.name = null
3285 sasl.kerberos.ticket.renew.jitter = 0.05
3286 sasl.kerberos.ticket.renew.window.factor = 0.8
3287 sasl.login.callback.handler.class = null
3288 sasl.login.class = null
3289 sasl.login.connect.timeout.ms = null
3290 sasl.login.read.timeout.ms = null
3291 sasl.login.refresh.buffer.seconds = 300
3292 sasl.login.refresh.min.period.seconds = 60
3293 sasl.login.refresh.window.factor = 0.8
3294 sasl.login.refresh.window.jitter = 0.05
3295 sasl.login.retry.backoff.max.ms = 10000
3296 sasl.login.retry.backoff.ms = 100
3297 sasl.mechanism.controller.protocol = GSSAPI
3298 sasl.mechanism.inter.broker.protocol = GSSAPI
3299 sasl.oauthbearer.assertion.algorithm = RS256
3300 sasl.oauthbearer.assertion.claim.aud = null
3301 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3302 sasl.oauthbearer.assertion.claim.iss = null
3303 sasl.oauthbearer.assertion.claim.jti.include = false
3304 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3305 sasl.oauthbearer.assertion.claim.sub = null
3306 sasl.oauthbearer.assertion.file = null
3307 sasl.oauthbearer.assertion.private.key.file = null
3308 sasl.oauthbearer.assertion.private.key.passphrase = null
3309 sasl.oauthbearer.assertion.template.file = null
3310 sasl.oauthbearer.client.credentials.client.id = null
3311 sasl.oauthbearer.client.credentials.client.secret = null
3312 sasl.oauthbearer.clock.skew.seconds = 30
3313 sasl.oauthbearer.expected.audience = null
3314 sasl.oauthbearer.expected.issuer = null
3315 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3316 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3317 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3318 sasl.oauthbearer.jwks.endpoint.url = null
3319 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3320 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3321 sasl.oauthbearer.scope = null
3322 sasl.oauthbearer.scope.claim.name = scope
3323 sasl.oauthbearer.sub.claim.name = sub
3324 sasl.oauthbearer.token.endpoint.url = null
3325 sasl.server.callback.handler.class = null
3326 sasl.server.max.receive.size = 524288
3327 security.inter.broker.protocol = PLAINTEXT
3328 security.providers = null
3329 server.max.startup.time.ms = 9223372036854775807
3330 share.coordinator.append.linger.ms = 5
3331 share.coordinator.cold.partition.snapshot.interval.ms = 300000
3332 share.coordinator.load.buffer.size = 5242880
3333 share.coordinator.snapshot.update.records.per.snapshot = 500
3334 share.coordinator.state.topic.compression.codec = 0
3335 share.coordinator.state.topic.min.isr = 2
3336 share.coordinator.state.topic.num.partitions = 50
3337 share.coordinator.state.topic.prune.interval.ms = 300000
3338 share.coordinator.state.topic.replication.factor = 3
3339 share.coordinator.state.topic.segment.bytes = 104857600
3340 share.coordinator.threads = 1
3341 share.coordinator.write.timeout.ms = 5000
3342 share.fetch.purgatory.purge.interval.requests = 1000
3343 socket.connection.setup.timeout.max.ms = 30000
3344 socket.connection.setup.timeout.ms = 10000
3345 socket.listen.backlog.size = 50
3346 socket.receive.buffer.bytes = 102400
3347 socket.request.max.bytes = 104857600
3348 socket.send.buffer.bytes = 102400
3349 ssl.allow.dn.changes = false
3350 ssl.allow.san.changes = false
3351 ssl.cipher.suites = []
3352 ssl.client.auth = none
3353 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3354 ssl.endpoint.identification.algorithm = https
3355 ssl.engine.factory.class = null
3356 ssl.key.password = null
3357 ssl.keymanager.algorithm = SunX509
3358 ssl.keystore.certificate.chain = null
3359 ssl.keystore.key = null
3360 ssl.keystore.location = null
3361 ssl.keystore.password = null
3362 ssl.keystore.type = JKS
3363 ssl.principal.mapping.rules = DEFAULT
3364 ssl.protocol = TLSv1.3
3365 ssl.provider = null
3366 ssl.secure.random.implementation = null
3367 ssl.trustmanager.algorithm = PKIX
3368 ssl.truststore.certificates = null
3369 ssl.truststore.location = null
3370 ssl.truststore.password = null
3371 ssl.truststore.type = JKS
3372 telemetry.max.bytes = 1048576
3373 transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
3374 transaction.max.timeout.ms = 900000
3375 transaction.partition.verification.enable = true
3376 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
3377 transaction.state.log.load.buffer.size = 5242880
3378 transaction.state.log.min.isr = 1
3379 transaction.state.log.num.partitions = 50
3380 transaction.state.log.replication.factor = 1
3381 transaction.state.log.segment.bytes = 104857600
3382 transaction.two.phase.commit.enable = false
3383 transactional.id.expiration.ms = 604800000
3384 unclean.leader.election.enable = false
3385 unclean.leader.election.interval.ms = 300000
3386 unstable.api.versions.enable = false
3387 unstable.feature.versions.enable = false
3388
338912:05:10.411 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for the broker to be unfenced
339012:05:10.414 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to unfence has been granted because it has caught up with the offset of its register broker record 5.
339112:05:10.419 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=-1, inControlledShutdown=0, logDirs=[])
339212:05:10.444 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker has been unfenced. Transitioning from RECOVERY to RUNNING.
339312:05:10.444 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for the broker to be unfenced
339412:05:10.445 [pool-67-thread-1] INFO o.a.k.s.n.EndpointReadyFutures - authorizerStart completed for endpoint BROKER. Endpoint is now READY.
339512:05:10.445 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Enabling request processing.
339612:05:10.446 [pool-67-thread-1] INFO k.n.DataPlaneAcceptor - Awaiting socket connections on localhost:6001.
339712:05:10.447 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the authorizer futures to be completed
339812:05:10.447 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the authorizer futures to be completed
339912:05:10.447 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Waiting for all of the SocketServer Acceptors to be started
340012:05:10.447 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Finished waiting for all of the SocketServer Acceptors to be started
340112:05:10.447 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTING to STARTED
340212:05:10.470 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3403 acks = -1
3404 batch.size = 16384
3405 bootstrap.servers = [localhost:6001]
3406 buffer.memory = 33554432
3407 client.dns.lookup = use_all_dns_ips
3408 client.id = producer-1
3409 compression.gzip.level = -1
3410 compression.lz4.level = 9
3411 compression.type = none
3412 compression.zstd.level = 3
3413 connections.max.idle.ms = 540000
3414 delivery.timeout.ms = 120000
3415 enable.idempotence = true
3416 enable.metrics.push = true
3417 interceptor.classes = []
3418 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3419 linger.ms = 5
3420 max.block.ms = 10000
3421 max.in.flight.requests.per.connection = 5
3422 max.request.size = 1048576
3423 metadata.max.age.ms = 300000
3424 metadata.max.idle.ms = 300000
3425 metadata.recovery.rebootstrap.trigger.ms = 300000
3426 metadata.recovery.strategy = rebootstrap
3427 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3428 metrics.num.samples = 2
3429 metrics.recording.level = INFO
3430 metrics.sample.window.ms = 30000
3431 partitioner.adaptive.partitioning.enable = true
3432 partitioner.availability.timeout.ms = 0
3433 partitioner.class = null
3434 partitioner.ignore.keys = false
3435 receive.buffer.bytes = 32768
3436 reconnect.backoff.max.ms = 1000
3437 reconnect.backoff.ms = 50
3438 request.timeout.ms = 30000
3439 retries = 2147483647
3440 retry.backoff.max.ms = 1000
3441 retry.backoff.ms = 1000
3442 sasl.client.callback.handler.class = null
3443 sasl.jaas.config = null
3444 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3445 sasl.kerberos.min.time.before.relogin = 60000
3446 sasl.kerberos.service.name = null
3447 sasl.kerberos.ticket.renew.jitter = 0.05
3448 sasl.kerberos.ticket.renew.window.factor = 0.8
3449 sasl.login.callback.handler.class = null
3450 sasl.login.class = null
3451 sasl.login.connect.timeout.ms = null
3452 sasl.login.read.timeout.ms = null
3453 sasl.login.refresh.buffer.seconds = 300
3454 sasl.login.refresh.min.period.seconds = 60
3455 sasl.login.refresh.window.factor = 0.8
3456 sasl.login.refresh.window.jitter = 0.05
3457 sasl.login.retry.backoff.max.ms = 10000
3458 sasl.login.retry.backoff.ms = 100
3459 sasl.mechanism = GSSAPI
3460 sasl.oauthbearer.assertion.algorithm = RS256
3461 sasl.oauthbearer.assertion.claim.aud = null
3462 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3463 sasl.oauthbearer.assertion.claim.iss = null
3464 sasl.oauthbearer.assertion.claim.jti.include = false
3465 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3466 sasl.oauthbearer.assertion.claim.sub = null
3467 sasl.oauthbearer.assertion.file = null
3468 sasl.oauthbearer.assertion.private.key.file = null
3469 sasl.oauthbearer.assertion.private.key.passphrase = null
3470 sasl.oauthbearer.assertion.template.file = null
3471 sasl.oauthbearer.client.credentials.client.id = null
3472 sasl.oauthbearer.client.credentials.client.secret = null
3473 sasl.oauthbearer.clock.skew.seconds = 30
3474 sasl.oauthbearer.expected.audience = null
3475 sasl.oauthbearer.expected.issuer = null
3476 sasl.oauthbearer.header.urlencode = false
3477 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3478 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3479 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3480 sasl.oauthbearer.jwks.endpoint.url = null
3481 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3482 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3483 sasl.oauthbearer.scope = null
3484 sasl.oauthbearer.scope.claim.name = scope
3485 sasl.oauthbearer.sub.claim.name = sub
3486 sasl.oauthbearer.token.endpoint.url = null
3487 security.protocol = PLAINTEXT
3488 security.providers = null
3489 send.buffer.bytes = 131072
3490 socket.connection.setup.timeout.max.ms = 30000
3491 socket.connection.setup.timeout.ms = 10000
3492 ssl.cipher.suites = null
3493 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3494 ssl.endpoint.identification.algorithm = https
3495 ssl.engine.factory.class = null
3496 ssl.key.password = null
3497 ssl.keymanager.algorithm = SunX509
3498 ssl.keystore.certificate.chain = null
3499 ssl.keystore.key = null
3500 ssl.keystore.location = null
3501 ssl.keystore.password = null
3502 ssl.keystore.type = JKS
3503 ssl.protocol = TLSv1.3
3504 ssl.provider = null
3505 ssl.secure.random.implementation = null
3506 ssl.trustmanager.algorithm = PKIX
3507 ssl.truststore.certificates = null
3508 ssl.truststore.location = null
3509 ssl.truststore.password = null
3510 ssl.truststore.type = JKS
3511 transaction.timeout.ms = 60000
3512 transaction.two.phase.commit.enable = false
3513 transactional.id = null
3514 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3515
351612:05:10.494 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
351712:05:10.502 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Instantiated an idempotent producer.
351812:05:10.516 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
351912:05:10.516 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
352012:05:10.517 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327910516
352112:05:10.532 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t1) to the active controller.
352212:05:10.537 [kafka-producer-network-thread | producer-1] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-1] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t1=UNKNOWN_TOPIC_OR_PARTITION}
352312:05:10.538 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.Metadata - [Producer clientId=producer-1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
352412:05:10.547 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
352512:05:10.547 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t1 with topic ID T83WPCCgQfqNgwJc-g_peg.
352612:05:10.550 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t1-0 with topic ID T83WPCCgQfqNgwJc-g_peg and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
352712:05:10.576 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
352812:05:10.577 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t1-0)
352912:05:10.580 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t1-0 with topic id T83WPCCgQfqNgwJc-g_peg.
353012:05:10.580 [quorum-controller-0-event-handler] INFO o.a.k.c.ProducerIdControlManager - [QuorumController id=0] Replaying ProducerIdsRecord ProducerIdsRecord(brokerId=0, brokerEpoch=5, nextProducerId=1000)
353112:05:10.590 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t1-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
353212:05:10.592 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t1-0 in /tmp/kafka-logs4345019044203235659/t1-0 with properties {}
353312:05:10.592 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] No checkpointed highwatermark is found for partition t1-0
353412:05:10.594 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t1-0 broker=0] Log loaded for partition t1-0 with initial high watermark 0
353512:05:10.599 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t1-0 with topic id Some(T83WPCCgQfqNgwJc-g_peg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
353612:05:11.543 [kafka-producer-network-thread | producer-1] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-1] ProducerId set to 0 with epoch 0
353712:05:11.576 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
353812:05:11.582 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
353912:05:11.583 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
354012:05:11.583 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
354112:05:11.583 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
354212:05:11.584 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-1 unregistered
354312:05:11.584 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3544 acks = -1
3545 batch.size = 16384
3546 bootstrap.servers = [localhost:6001]
3547 buffer.memory = 33554432
3548 client.dns.lookup = use_all_dns_ips
3549 client.id = producer-2
3550 compression.gzip.level = -1
3551 compression.lz4.level = 9
3552 compression.type = none
3553 compression.zstd.level = 3
3554 connections.max.idle.ms = 540000
3555 delivery.timeout.ms = 120000
3556 enable.idempotence = true
3557 enable.metrics.push = true
3558 interceptor.classes = []
3559 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3560 linger.ms = 5
3561 max.block.ms = 10000
3562 max.in.flight.requests.per.connection = 5
3563 max.request.size = 1048576
3564 metadata.max.age.ms = 300000
3565 metadata.max.idle.ms = 300000
3566 metadata.recovery.rebootstrap.trigger.ms = 300000
3567 metadata.recovery.strategy = rebootstrap
3568 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3569 metrics.num.samples = 2
3570 metrics.recording.level = INFO
3571 metrics.sample.window.ms = 30000
3572 partitioner.adaptive.partitioning.enable = true
3573 partitioner.availability.timeout.ms = 0
3574 partitioner.class = null
3575 partitioner.ignore.keys = false
3576 receive.buffer.bytes = 32768
3577 reconnect.backoff.max.ms = 1000
3578 reconnect.backoff.ms = 50
3579 request.timeout.ms = 30000
3580 retries = 2147483647
3581 retry.backoff.max.ms = 1000
3582 retry.backoff.ms = 1000
3583 sasl.client.callback.handler.class = null
3584 sasl.jaas.config = null
3585 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3586 sasl.kerberos.min.time.before.relogin = 60000
3587 sasl.kerberos.service.name = null
3588 sasl.kerberos.ticket.renew.jitter = 0.05
3589 sasl.kerberos.ticket.renew.window.factor = 0.8
3590 sasl.login.callback.handler.class = null
3591 sasl.login.class = null
3592 sasl.login.connect.timeout.ms = null
3593 sasl.login.read.timeout.ms = null
3594 sasl.login.refresh.buffer.seconds = 300
3595 sasl.login.refresh.min.period.seconds = 60
3596 sasl.login.refresh.window.factor = 0.8
3597 sasl.login.refresh.window.jitter = 0.05
3598 sasl.login.retry.backoff.max.ms = 10000
3599 sasl.login.retry.backoff.ms = 100
3600 sasl.mechanism = GSSAPI
3601 sasl.oauthbearer.assertion.algorithm = RS256
3602 sasl.oauthbearer.assertion.claim.aud = null
3603 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3604 sasl.oauthbearer.assertion.claim.iss = null
3605 sasl.oauthbearer.assertion.claim.jti.include = false
3606 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3607 sasl.oauthbearer.assertion.claim.sub = null
3608 sasl.oauthbearer.assertion.file = null
3609 sasl.oauthbearer.assertion.private.key.file = null
3610 sasl.oauthbearer.assertion.private.key.passphrase = null
3611 sasl.oauthbearer.assertion.template.file = null
3612 sasl.oauthbearer.client.credentials.client.id = null
3613 sasl.oauthbearer.client.credentials.client.secret = null
3614 sasl.oauthbearer.clock.skew.seconds = 30
3615 sasl.oauthbearer.expected.audience = null
3616 sasl.oauthbearer.expected.issuer = null
3617 sasl.oauthbearer.header.urlencode = false
3618 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3619 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3620 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3621 sasl.oauthbearer.jwks.endpoint.url = null
3622 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3623 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3624 sasl.oauthbearer.scope = null
3625 sasl.oauthbearer.scope.claim.name = scope
3626 sasl.oauthbearer.sub.claim.name = sub
3627 sasl.oauthbearer.token.endpoint.url = null
3628 security.protocol = PLAINTEXT
3629 security.providers = null
3630 send.buffer.bytes = 131072
3631 socket.connection.setup.timeout.max.ms = 30000
3632 socket.connection.setup.timeout.ms = 10000
3633 ssl.cipher.suites = null
3634 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3635 ssl.endpoint.identification.algorithm = https
3636 ssl.engine.factory.class = null
3637 ssl.key.password = null
3638 ssl.keymanager.algorithm = SunX509
3639 ssl.keystore.certificate.chain = null
3640 ssl.keystore.key = null
3641 ssl.keystore.location = null
3642 ssl.keystore.password = null
3643 ssl.keystore.type = JKS
3644 ssl.protocol = TLSv1.3
3645 ssl.provider = null
3646 ssl.secure.random.implementation = null
3647 ssl.trustmanager.algorithm = PKIX
3648 ssl.truststore.certificates = null
3649 ssl.truststore.location = null
3650 ssl.truststore.password = null
3651 ssl.truststore.type = JKS
3652 transaction.timeout.ms = 60000
3653 transaction.two.phase.commit.enable = false
3654 transactional.id = null
3655 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3656
365712:05:11.585 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
365812:05:11.585 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Instantiated an idempotent producer.
365912:05:11.591 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
366012:05:11.592 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
366112:05:11.592 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327911591
366212:05:11.597 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.Metadata - [Producer clientId=producer-2] Cluster ID: cERjULLDRBGv7lPJWPu8sA
366312:05:11.598 [kafka-producer-network-thread | producer-2] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-2] ProducerId set to 1 with epoch 0
366412:05:11.607 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-2] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
366512:05:11.611 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
366612:05:11.611 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
366712:05:11.611 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
366812:05:11.611 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
366912:05:11.611 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-2 unregistered
367012:05:11.612 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3671 acks = -1
3672 batch.size = 16384
3673 bootstrap.servers = [localhost:6001]
3674 buffer.memory = 33554432
3675 client.dns.lookup = use_all_dns_ips
3676 client.id = producer-3
3677 compression.gzip.level = -1
3678 compression.lz4.level = 9
3679 compression.type = none
3680 compression.zstd.level = 3
3681 connections.max.idle.ms = 540000
3682 delivery.timeout.ms = 120000
3683 enable.idempotence = true
3684 enable.metrics.push = true
3685 interceptor.classes = []
3686 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3687 linger.ms = 5
3688 max.block.ms = 10000
3689 max.in.flight.requests.per.connection = 5
3690 max.request.size = 1048576
3691 metadata.max.age.ms = 300000
3692 metadata.max.idle.ms = 300000
3693 metadata.recovery.rebootstrap.trigger.ms = 300000
3694 metadata.recovery.strategy = rebootstrap
3695 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3696 metrics.num.samples = 2
3697 metrics.recording.level = INFO
3698 metrics.sample.window.ms = 30000
3699 partitioner.adaptive.partitioning.enable = true
3700 partitioner.availability.timeout.ms = 0
3701 partitioner.class = null
3702 partitioner.ignore.keys = false
3703 receive.buffer.bytes = 32768
3704 reconnect.backoff.max.ms = 1000
3705 reconnect.backoff.ms = 50
3706 request.timeout.ms = 30000
3707 retries = 2147483647
3708 retry.backoff.max.ms = 1000
3709 retry.backoff.ms = 1000
3710 sasl.client.callback.handler.class = null
3711 sasl.jaas.config = null
3712 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3713 sasl.kerberos.min.time.before.relogin = 60000
3714 sasl.kerberos.service.name = null
3715 sasl.kerberos.ticket.renew.jitter = 0.05
3716 sasl.kerberos.ticket.renew.window.factor = 0.8
3717 sasl.login.callback.handler.class = null
3718 sasl.login.class = null
3719 sasl.login.connect.timeout.ms = null
3720 sasl.login.read.timeout.ms = null
3721 sasl.login.refresh.buffer.seconds = 300
3722 sasl.login.refresh.min.period.seconds = 60
3723 sasl.login.refresh.window.factor = 0.8
3724 sasl.login.refresh.window.jitter = 0.05
3725 sasl.login.retry.backoff.max.ms = 10000
3726 sasl.login.retry.backoff.ms = 100
3727 sasl.mechanism = GSSAPI
3728 sasl.oauthbearer.assertion.algorithm = RS256
3729 sasl.oauthbearer.assertion.claim.aud = null
3730 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3731 sasl.oauthbearer.assertion.claim.iss = null
3732 sasl.oauthbearer.assertion.claim.jti.include = false
3733 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3734 sasl.oauthbearer.assertion.claim.sub = null
3735 sasl.oauthbearer.assertion.file = null
3736 sasl.oauthbearer.assertion.private.key.file = null
3737 sasl.oauthbearer.assertion.private.key.passphrase = null
3738 sasl.oauthbearer.assertion.template.file = null
3739 sasl.oauthbearer.client.credentials.client.id = null
3740 sasl.oauthbearer.client.credentials.client.secret = null
3741 sasl.oauthbearer.clock.skew.seconds = 30
3742 sasl.oauthbearer.expected.audience = null
3743 sasl.oauthbearer.expected.issuer = null
3744 sasl.oauthbearer.header.urlencode = false
3745 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3746 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3747 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3748 sasl.oauthbearer.jwks.endpoint.url = null
3749 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3750 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3751 sasl.oauthbearer.scope = null
3752 sasl.oauthbearer.scope.claim.name = scope
3753 sasl.oauthbearer.sub.claim.name = sub
3754 sasl.oauthbearer.token.endpoint.url = null
3755 security.protocol = PLAINTEXT
3756 security.providers = null
3757 send.buffer.bytes = 131072
3758 socket.connection.setup.timeout.max.ms = 30000
3759 socket.connection.setup.timeout.ms = 10000
3760 ssl.cipher.suites = null
3761 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3762 ssl.endpoint.identification.algorithm = https
3763 ssl.engine.factory.class = null
3764 ssl.key.password = null
3765 ssl.keymanager.algorithm = SunX509
3766 ssl.keystore.certificate.chain = null
3767 ssl.keystore.key = null
3768 ssl.keystore.location = null
3769 ssl.keystore.password = null
3770 ssl.keystore.type = JKS
3771 ssl.protocol = TLSv1.3
3772 ssl.provider = null
3773 ssl.secure.random.implementation = null
3774 ssl.trustmanager.algorithm = PKIX
3775 ssl.truststore.certificates = null
3776 ssl.truststore.location = null
3777 ssl.truststore.password = null
3778 ssl.truststore.type = JKS
3779 transaction.timeout.ms = 60000
3780 transaction.two.phase.commit.enable = false
3781 transactional.id = null
3782 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
3783
378412:05:11.612 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
378512:05:11.613 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Instantiated an idempotent producer.
378612:05:11.616 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
378712:05:11.616 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
378812:05:11.616 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327911616
378912:05:11.620 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.Metadata - [Producer clientId=producer-3] Cluster ID: cERjULLDRBGv7lPJWPu8sA
379012:05:11.621 [kafka-producer-network-thread | producer-3] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-3] ProducerId set to 2 with epoch 0
379112:05:11.632 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-3] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
379212:05:11.635 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
379312:05:11.635 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
379412:05:11.636 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
379512:05:11.636 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
379612:05:11.636 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-3 unregistered
379712:05:11.654 [virtual-611] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
3798 allow.auto.create.topics = true
3799 auto.commit.interval.ms = 5000
3800 auto.offset.reset = earliest
3801 bootstrap.servers = [localhost:6001]
3802 check.crcs = true
3803 client.dns.lookup = use_all_dns_ips
3804 client.id = consumer-g1-1
3805 client.rack =
3806 connections.max.idle.ms = 540000
3807 default.api.timeout.ms = 60000
3808 enable.auto.commit = false
3809 enable.metrics.push = true
3810 exclude.internal.topics = true
3811 fetch.max.bytes = 52428800
3812 fetch.max.wait.ms = 500
3813 fetch.min.bytes = 1
3814 group.id = g1
3815 group.instance.id = null
3816 group.protocol = classic
3817 group.remote.assignor = null
3818 heartbeat.interval.ms = 3000
3819 interceptor.classes = []
3820 internal.leave.group.on.close = true
3821 internal.throw.on.fetch.stable.offset.unsupported = false
3822 isolation.level = read_uncommitted
3823 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3824 max.partition.fetch.bytes = 1048576
3825 max.poll.interval.ms = 300000
3826 max.poll.records = 500
3827 metadata.max.age.ms = 300000
3828 metadata.recovery.rebootstrap.trigger.ms = 300000
3829 metadata.recovery.strategy = rebootstrap
3830 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3831 metrics.num.samples = 2
3832 metrics.recording.level = INFO
3833 metrics.sample.window.ms = 30000
3834 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
3835 receive.buffer.bytes = 65536
3836 reconnect.backoff.max.ms = 1000
3837 reconnect.backoff.ms = 50
3838 request.timeout.ms = 30000
3839 retry.backoff.max.ms = 1000
3840 retry.backoff.ms = 100
3841 sasl.client.callback.handler.class = null
3842 sasl.jaas.config = null
3843 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3844 sasl.kerberos.min.time.before.relogin = 60000
3845 sasl.kerberos.service.name = null
3846 sasl.kerberos.ticket.renew.jitter = 0.05
3847 sasl.kerberos.ticket.renew.window.factor = 0.8
3848 sasl.login.callback.handler.class = null
3849 sasl.login.class = null
3850 sasl.login.connect.timeout.ms = null
3851 sasl.login.read.timeout.ms = null
3852 sasl.login.refresh.buffer.seconds = 300
3853 sasl.login.refresh.min.period.seconds = 60
3854 sasl.login.refresh.window.factor = 0.8
3855 sasl.login.refresh.window.jitter = 0.05
3856 sasl.login.retry.backoff.max.ms = 10000
3857 sasl.login.retry.backoff.ms = 100
3858 sasl.mechanism = GSSAPI
3859 sasl.oauthbearer.assertion.algorithm = RS256
3860 sasl.oauthbearer.assertion.claim.aud = null
3861 sasl.oauthbearer.assertion.claim.exp.seconds = 300
3862 sasl.oauthbearer.assertion.claim.iss = null
3863 sasl.oauthbearer.assertion.claim.jti.include = false
3864 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
3865 sasl.oauthbearer.assertion.claim.sub = null
3866 sasl.oauthbearer.assertion.file = null
3867 sasl.oauthbearer.assertion.private.key.file = null
3868 sasl.oauthbearer.assertion.private.key.passphrase = null
3869 sasl.oauthbearer.assertion.template.file = null
3870 sasl.oauthbearer.client.credentials.client.id = null
3871 sasl.oauthbearer.client.credentials.client.secret = null
3872 sasl.oauthbearer.clock.skew.seconds = 30
3873 sasl.oauthbearer.expected.audience = null
3874 sasl.oauthbearer.expected.issuer = null
3875 sasl.oauthbearer.header.urlencode = false
3876 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
3877 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
3878 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
3879 sasl.oauthbearer.jwks.endpoint.url = null
3880 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
3881 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
3882 sasl.oauthbearer.scope = null
3883 sasl.oauthbearer.scope.claim.name = scope
3884 sasl.oauthbearer.sub.claim.name = sub
3885 sasl.oauthbearer.token.endpoint.url = null
3886 security.protocol = PLAINTEXT
3887 security.providers = null
3888 send.buffer.bytes = 131072
3889 session.timeout.ms = 45000
3890 share.acknowledgement.mode = implicit
3891 socket.connection.setup.timeout.max.ms = 30000
3892 socket.connection.setup.timeout.ms = 10000
3893 ssl.cipher.suites = null
3894 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
3895 ssl.endpoint.identification.algorithm = https
3896 ssl.engine.factory.class = null
3897 ssl.key.password = null
3898 ssl.keymanager.algorithm = SunX509
3899 ssl.keystore.certificate.chain = null
3900 ssl.keystore.key = null
3901 ssl.keystore.location = null
3902 ssl.keystore.password = null
3903 ssl.keystore.type = JKS
3904 ssl.protocol = TLSv1.3
3905 ssl.provider = null
3906 ssl.secure.random.implementation = null
3907 ssl.trustmanager.algorithm = PKIX
3908 ssl.truststore.certificates = null
3909 ssl.truststore.location = null
3910 ssl.truststore.password = null
3911 ssl.truststore.type = JKS
3912 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
3913
391412:05:11.664 [virtual-611] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
391512:05:11.697 [virtual-611] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
391612:05:11.697 [virtual-611] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
391712:05:11.697 [virtual-611] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327911697
391812:05:11.719 [virtual-618] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g1-1, groupId=g1] Subscribed to topic(s): t1
391912:05:11.725 [virtual-618] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g1-1, groupId=g1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
392012:05:11.726 [data-plane-kafka-request-handler-7] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(__consumer_offsets) to the active controller.
392112:05:11.729 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='__consumer_offsets', numPartitions=1, replicationFactor=1, assignments=[], configs=[CreatableTopicConfig(name='compression.type', value='producer'), CreatableTopicConfig(name='cleanup.policy', value='compact'), CreatableTopicConfig(name='segment.bytes', value='104857600')]): SUCCESS
392212:05:11.730 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic __consumer_offsets with topic ID mgl9ca4oSYGuL126573dhw.
392312:05:11.730 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration compression.type to producer
392412:05:11.730 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration cleanup.policy to compact
392512:05:11.731 [quorum-controller-0-event-handler] INFO o.a.k.c.ConfigurationControlManager - [QuorumController id=0] Replayed ConfigRecord for ConfigResource(type=TOPIC, name='__consumer_offsets') which set configuration segment.bytes to 104857600
392612:05:11.731 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition __consumer_offsets-0 with topic ID mgl9ca4oSYGuL126573dhw and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
392712:05:11.757 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
392812:05:11.758 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(__consumer_offsets-0)
392912:05:11.758 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition __consumer_offsets-0 with topic id mgl9ca4oSYGuL126573dhw.
393012:05:11.760 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=__consumer_offsets-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
393112:05:11.760 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition __consumer_offsets-0 in /tmp/kafka-logs4345019044203235659/__consumer_offsets-0 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600}
393212:05:11.761 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0
393312:05:11.761 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0
393412:05:11.761 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader __consumer_offsets-0 with topic id Some(mgl9ca4oSYGuL126573dhw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
393512:05:11.764 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling loading of metadata from __consumer_offsets-0 with epoch 0
393612:05:11.771 [kafka-0-metadata-loader-event-handler] INFO k.s.m.DynamicConfigPublisher - [DynamicConfigPublisher broker id=0] Updating topic __consumer_offsets with new configuration : compression.type -> producer,cleanup.policy -> compact,segment.bytes -> 104857600
393712:05:11.784 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished loading of metadata from __consumer_offsets-0 with epoch 0 in 2ms where 1ms was spent in the scheduler. Loaded 0 records which total to 0 bytes.
393812:05:11.838 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
393912:05:11.839 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
394012:05:11.849 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g1 in Empty state. Created a new member id consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b and requesting the member to rejoin with this id.
394112:05:11.850 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: need to re-join with the given member-id: consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b
394212:05:11.850 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] (Re-)joining group
394312:05:11.854 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b joins group g1 in Empty state. Adding to the group now.
394412:05:11.855 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b with group instance id null; client reason: need to re-join with the given member-id: consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b).
394512:05:14.858 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g1 generation 1 with 1 members.
394612:05:14.860 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b', protocol='range'}
394712:05:14.865 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Finished assignment for group at generation 1: {consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b=Assignment(partitions=[t1-0])}
394812:05:14.868 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b for group g1 for generation 1. The group has 1 members, 0 of which are static.
394912:05:14.876 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b', protocol='range'}
395012:05:14.876 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Notifying assignor about the new Assignment(partitions=[t1-0])
395112:05:14.878 [virtual-618] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Adding newly assigned partitions: [t1-0]
395212:05:14.885 [virtual-618] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Found no committed offset for partition t1-0
395312:05:14.898 [virtual-618] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting offset for partition t1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
395412:05:15.185 [virtual-611] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
3955 acks = -1
3956 batch.size = 16384
3957 bootstrap.servers = [localhost:6001]
3958 buffer.memory = 33554432
3959 client.dns.lookup = use_all_dns_ips
3960 client.id = producer-4
3961 compression.gzip.level = -1
3962 compression.lz4.level = 9
3963 compression.type = none
3964 compression.zstd.level = 3
3965 connections.max.idle.ms = 540000
3966 delivery.timeout.ms = 120000
3967 enable.idempotence = true
3968 enable.metrics.push = true
3969 interceptor.classes = []
3970 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
3971 linger.ms = 5
3972 max.block.ms = 10000
3973 max.in.flight.requests.per.connection = 5
3974 max.request.size = 1048576
3975 metadata.max.age.ms = 300000
3976 metadata.max.idle.ms = 300000
3977 metadata.recovery.rebootstrap.trigger.ms = 300000
3978 metadata.recovery.strategy = rebootstrap
3979 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
3980 metrics.num.samples = 2
3981 metrics.recording.level = INFO
3982 metrics.sample.window.ms = 30000
3983 partitioner.adaptive.partitioning.enable = true
3984 partitioner.availability.timeout.ms = 0
3985 partitioner.class = null
3986 partitioner.ignore.keys = false
3987 receive.buffer.bytes = 32768
3988 reconnect.backoff.max.ms = 1000
3989 reconnect.backoff.ms = 50
3990 request.timeout.ms = 30000
3991 retries = 2147483647
3992 retry.backoff.max.ms = 1000
3993 retry.backoff.ms = 1000
3994 sasl.client.callback.handler.class = null
3995 sasl.jaas.config = null
3996 sasl.kerberos.kinit.cmd = /usr/bin/kinit
3997 sasl.kerberos.min.time.before.relogin = 60000
3998 sasl.kerberos.service.name = null
3999 sasl.kerberos.ticket.renew.jitter = 0.05
4000 sasl.kerberos.ticket.renew.window.factor = 0.8
4001 sasl.login.callback.handler.class = null
4002 sasl.login.class = null
4003 sasl.login.connect.timeout.ms = null
4004 sasl.login.read.timeout.ms = null
4005 sasl.login.refresh.buffer.seconds = 300
4006 sasl.login.refresh.min.period.seconds = 60
4007 sasl.login.refresh.window.factor = 0.8
4008 sasl.login.refresh.window.jitter = 0.05
4009 sasl.login.retry.backoff.max.ms = 10000
4010 sasl.login.retry.backoff.ms = 100
4011 sasl.mechanism = GSSAPI
4012 sasl.oauthbearer.assertion.algorithm = RS256
4013 sasl.oauthbearer.assertion.claim.aud = null
4014 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4015 sasl.oauthbearer.assertion.claim.iss = null
4016 sasl.oauthbearer.assertion.claim.jti.include = false
4017 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4018 sasl.oauthbearer.assertion.claim.sub = null
4019 sasl.oauthbearer.assertion.file = null
4020 sasl.oauthbearer.assertion.private.key.file = null
4021 sasl.oauthbearer.assertion.private.key.passphrase = null
4022 sasl.oauthbearer.assertion.template.file = null
4023 sasl.oauthbearer.client.credentials.client.id = null
4024 sasl.oauthbearer.client.credentials.client.secret = null
4025 sasl.oauthbearer.clock.skew.seconds = 30
4026 sasl.oauthbearer.expected.audience = null
4027 sasl.oauthbearer.expected.issuer = null
4028 sasl.oauthbearer.header.urlencode = false
4029 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4030 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4031 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4032 sasl.oauthbearer.jwks.endpoint.url = null
4033 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4034 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4035 sasl.oauthbearer.scope = null
4036 sasl.oauthbearer.scope.claim.name = scope
4037 sasl.oauthbearer.sub.claim.name = sub
4038 sasl.oauthbearer.token.endpoint.url = null
4039 security.protocol = PLAINTEXT
4040 security.providers = null
4041 send.buffer.bytes = 131072
4042 socket.connection.setup.timeout.max.ms = 30000
4043 socket.connection.setup.timeout.ms = 10000
4044 ssl.cipher.suites = null
4045 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4046 ssl.endpoint.identification.algorithm = https
4047 ssl.engine.factory.class = null
4048 ssl.key.password = null
4049 ssl.keymanager.algorithm = SunX509
4050 ssl.keystore.certificate.chain = null
4051 ssl.keystore.key = null
4052 ssl.keystore.location = null
4053 ssl.keystore.password = null
4054 ssl.keystore.type = JKS
4055 ssl.protocol = TLSv1.3
4056 ssl.provider = null
4057 ssl.secure.random.implementation = null
4058 ssl.trustmanager.algorithm = PKIX
4059 ssl.truststore.certificates = null
4060 ssl.truststore.location = null
4061 ssl.truststore.password = null
4062 ssl.truststore.type = JKS
4063 transaction.timeout.ms = 60000
4064 transaction.two.phase.commit.enable = false
4065 transactional.id = null
4066 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4067
406812:05:15.185 [virtual-611] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
406912:05:15.186 [virtual-611] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Instantiated an idempotent producer.
407012:05:15.189 [virtual-611] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
407112:05:15.189 [virtual-611] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
407212:05:15.189 [virtual-611] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327915189
407312:05:15.194 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.Metadata - [Producer clientId=producer-4] Cluster ID: cERjULLDRBGv7lPJWPu8sA
407412:05:15.194 [kafka-producer-network-thread | producer-4] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-4] ProducerId set to 3 with epoch 0
407512:05:15.207 [virtual-611] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-4] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
407612:05:15.211 [virtual-611] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
407712:05:15.211 [virtual-611] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
407812:05:15.211 [virtual-611] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
407912:05:15.211 [virtual-611] INFO o.a.k.c.m.Metrics - Metrics reporters closed
408012:05:15.213 [virtual-611] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-4 unregistered
408112:05:15.215 [virtual-617] ERROR o.k.KafkaFlow$ - Exception when polling for records
4082java.lang.InterruptedException: null
4083 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
4084 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
4085 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
4086 at ox.channels.ActorRef.ask(actor.scala:64)
4087 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
4088 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
4089 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4090 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4091 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
4092 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
4093 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
4094 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4095 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4096 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
409712:05:15.215 [virtual-618] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
4098java.lang.InterruptedException: null
4099 ... 18 common frames omitted
4100Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
4101 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
4102 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
4103 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
4104 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
4105 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
4106 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
4107 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
4108 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
4109 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
4110 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
4111 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
4112 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
4113 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
4114 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
4115 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
4116 at scala.Function0.apply$mcV$sp(Function0.scala:45)
4117 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
4118 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
411912:05:15.232 [virtual-624] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g1-1, groupId=g1] Revoke previously assigned partitions [t1-0]
412012:05:15.233 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Member consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
412112:05:15.234 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Resetting generation and member id due to: consumer pro-actively leaving the group
412212:05:15.234 [virtual-624] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g1-1, groupId=g1] Request joining group due to: consumer pro-actively leaving the group
412312:05:15.235 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g1] Member consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
412412:05:15.236 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g1-1-e7ec5a5c-3986-4ae2-ade4-be3ba7d9647b) members.).
412512:05:15.237 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g1 with generation 2 is now empty.
412612:05:15.712 [virtual-624] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
412712:05:15.712 [virtual-624] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
412812:05:15.712 [virtual-624] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
412912:05:15.712 [virtual-624] INFO o.a.k.c.m.Metrics - Metrics reporters closed
413012:05:15.715 [virtual-624] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g1-1 unregistered
413112:05:15.726 [virtual-626] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4132 acks = -1
4133 batch.size = 16384
4134 bootstrap.servers = [localhost:6001]
4135 buffer.memory = 33554432
4136 client.dns.lookup = use_all_dns_ips
4137 client.id = producer-5
4138 compression.gzip.level = -1
4139 compression.lz4.level = 9
4140 compression.type = none
4141 compression.zstd.level = 3
4142 connections.max.idle.ms = 540000
4143 delivery.timeout.ms = 120000
4144 enable.idempotence = true
4145 enable.metrics.push = true
4146 interceptor.classes = []
4147 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4148 linger.ms = 5
4149 max.block.ms = 60000
4150 max.in.flight.requests.per.connection = 5
4151 max.request.size = 1048576
4152 metadata.max.age.ms = 300000
4153 metadata.max.idle.ms = 300000
4154 metadata.recovery.rebootstrap.trigger.ms = 300000
4155 metadata.recovery.strategy = rebootstrap
4156 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4157 metrics.num.samples = 2
4158 metrics.recording.level = INFO
4159 metrics.sample.window.ms = 30000
4160 partitioner.adaptive.partitioning.enable = true
4161 partitioner.availability.timeout.ms = 0
4162 partitioner.class = null
4163 partitioner.ignore.keys = false
4164 receive.buffer.bytes = 32768
4165 reconnect.backoff.max.ms = 1000
4166 reconnect.backoff.ms = 50
4167 request.timeout.ms = 30000
4168 retries = 2147483647
4169 retry.backoff.max.ms = 1000
4170 retry.backoff.ms = 100
4171 sasl.client.callback.handler.class = null
4172 sasl.jaas.config = null
4173 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4174 sasl.kerberos.min.time.before.relogin = 60000
4175 sasl.kerberos.service.name = null
4176 sasl.kerberos.ticket.renew.jitter = 0.05
4177 sasl.kerberos.ticket.renew.window.factor = 0.8
4178 sasl.login.callback.handler.class = null
4179 sasl.login.class = null
4180 sasl.login.connect.timeout.ms = null
4181 sasl.login.read.timeout.ms = null
4182 sasl.login.refresh.buffer.seconds = 300
4183 sasl.login.refresh.min.period.seconds = 60
4184 sasl.login.refresh.window.factor = 0.8
4185 sasl.login.refresh.window.jitter = 0.05
4186 sasl.login.retry.backoff.max.ms = 10000
4187 sasl.login.retry.backoff.ms = 100
4188 sasl.mechanism = GSSAPI
4189 sasl.oauthbearer.assertion.algorithm = RS256
4190 sasl.oauthbearer.assertion.claim.aud = null
4191 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4192 sasl.oauthbearer.assertion.claim.iss = null
4193 sasl.oauthbearer.assertion.claim.jti.include = false
4194 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4195 sasl.oauthbearer.assertion.claim.sub = null
4196 sasl.oauthbearer.assertion.file = null
4197 sasl.oauthbearer.assertion.private.key.file = null
4198 sasl.oauthbearer.assertion.private.key.passphrase = null
4199 sasl.oauthbearer.assertion.template.file = null
4200 sasl.oauthbearer.client.credentials.client.id = null
4201 sasl.oauthbearer.client.credentials.client.secret = null
4202 sasl.oauthbearer.clock.skew.seconds = 30
4203 sasl.oauthbearer.expected.audience = null
4204 sasl.oauthbearer.expected.issuer = null
4205 sasl.oauthbearer.header.urlencode = false
4206 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4207 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4208 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4209 sasl.oauthbearer.jwks.endpoint.url = null
4210 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4211 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4212 sasl.oauthbearer.scope = null
4213 sasl.oauthbearer.scope.claim.name = scope
4214 sasl.oauthbearer.sub.claim.name = sub
4215 sasl.oauthbearer.token.endpoint.url = null
4216 security.protocol = PLAINTEXT
4217 security.providers = null
4218 send.buffer.bytes = 131072
4219 socket.connection.setup.timeout.max.ms = 30000
4220 socket.connection.setup.timeout.ms = 10000
4221 ssl.cipher.suites = null
4222 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4223 ssl.endpoint.identification.algorithm = https
4224 ssl.engine.factory.class = null
4225 ssl.key.password = null
4226 ssl.keymanager.algorithm = SunX509
4227 ssl.keystore.certificate.chain = null
4228 ssl.keystore.key = null
4229 ssl.keystore.location = null
4230 ssl.keystore.password = null
4231 ssl.keystore.type = JKS
4232 ssl.protocol = TLSv1.3
4233 ssl.provider = null
4234 ssl.secure.random.implementation = null
4235 ssl.trustmanager.algorithm = PKIX
4236 ssl.truststore.certificates = null
4237 ssl.truststore.location = null
4238 ssl.truststore.password = null
4239 ssl.truststore.type = JKS
4240 transaction.timeout.ms = 60000
4241 transaction.two.phase.commit.enable = false
4242 transactional.id = null
4243 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4244
424512:05:15.727 [virtual-626] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
424612:05:15.728 [virtual-626] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Instantiated an idempotent producer.
424712:05:15.730 [virtual-626] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
424812:05:15.730 [virtual-626] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
424912:05:15.730 [virtual-626] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327915730
425012:05:15.736 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.Metadata - [Producer clientId=producer-5] Cluster ID: cERjULLDRBGv7lPJWPu8sA
425112:05:15.737 [kafka-producer-network-thread | producer-5] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-5] ProducerId set to 4 with epoch 0
425212:05:15.749 [data-plane-kafka-request-handler-4] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t2) to the active controller.
425312:05:15.750 [kafka-producer-network-thread | producer-5] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-5] The metadata response from the cluster reported a recoverable issue with correlation id 5 : {t2=UNKNOWN_TOPIC_OR_PARTITION}
425412:05:15.752 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
425512:05:15.752 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t2 with topic ID WAxm-GEoTE6RWX46UBDshg.
425612:05:15.752 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t2-0 with topic ID WAxm-GEoTE6RWX46UBDshg and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
425712:05:15.779 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
425812:05:15.779 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t2-0)
425912:05:15.779 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t2-0 with topic id WAxm-GEoTE6RWX46UBDshg.
426012:05:15.781 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t2-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
426112:05:15.782 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t2-0 in /tmp/kafka-logs4345019044203235659/t2-0 with properties {}
426212:05:15.782 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] No checkpointed highwatermark is found for partition t2-0
426312:05:15.782 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t2-0 broker=0] Log loaded for partition t2-0 with initial high watermark 0
426412:05:15.783 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t2-0 with topic id Some(WAxm-GEoTE6RWX46UBDshg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
426512:05:15.960 [virtual-630] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-5] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
426612:05:15.963 [virtual-630] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
426712:05:15.963 [virtual-630] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
426812:05:15.963 [virtual-630] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
426912:05:15.963 [virtual-630] INFO o.a.k.c.m.Metrics - Metrics reporters closed
427012:05:15.963 [virtual-630] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-5 unregistered
427112:05:15.967 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4272 allow.auto.create.topics = true
4273 auto.commit.interval.ms = 5000
4274 auto.offset.reset = earliest
4275 bootstrap.servers = [localhost:6001]
4276 check.crcs = true
4277 client.dns.lookup = use_all_dns_ips
4278 client.id = consumer-embedded-kafka-spec-2
4279 client.rack =
4280 connections.max.idle.ms = 540000
4281 default.api.timeout.ms = 60000
4282 enable.auto.commit = false
4283 enable.metrics.push = true
4284 exclude.internal.topics = true
4285 fetch.max.bytes = 52428800
4286 fetch.max.wait.ms = 500
4287 fetch.min.bytes = 1
4288 group.id = embedded-kafka-spec
4289 group.instance.id = null
4290 group.protocol = classic
4291 group.remote.assignor = null
4292 heartbeat.interval.ms = 3000
4293 interceptor.classes = []
4294 internal.leave.group.on.close = true
4295 internal.throw.on.fetch.stable.offset.unsupported = false
4296 isolation.level = read_uncommitted
4297 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4298 max.partition.fetch.bytes = 1048576
4299 max.poll.interval.ms = 300000
4300 max.poll.records = 500
4301 metadata.max.age.ms = 300000
4302 metadata.recovery.rebootstrap.trigger.ms = 300000
4303 metadata.recovery.strategy = rebootstrap
4304 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4305 metrics.num.samples = 2
4306 metrics.recording.level = INFO
4307 metrics.sample.window.ms = 30000
4308 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4309 receive.buffer.bytes = 65536
4310 reconnect.backoff.max.ms = 1000
4311 reconnect.backoff.ms = 50
4312 request.timeout.ms = 30000
4313 retry.backoff.max.ms = 1000
4314 retry.backoff.ms = 100
4315 sasl.client.callback.handler.class = null
4316 sasl.jaas.config = null
4317 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4318 sasl.kerberos.min.time.before.relogin = 60000
4319 sasl.kerberos.service.name = null
4320 sasl.kerberos.ticket.renew.jitter = 0.05
4321 sasl.kerberos.ticket.renew.window.factor = 0.8
4322 sasl.login.callback.handler.class = null
4323 sasl.login.class = null
4324 sasl.login.connect.timeout.ms = null
4325 sasl.login.read.timeout.ms = null
4326 sasl.login.refresh.buffer.seconds = 300
4327 sasl.login.refresh.min.period.seconds = 60
4328 sasl.login.refresh.window.factor = 0.8
4329 sasl.login.refresh.window.jitter = 0.05
4330 sasl.login.retry.backoff.max.ms = 10000
4331 sasl.login.retry.backoff.ms = 100
4332 sasl.mechanism = GSSAPI
4333 sasl.oauthbearer.assertion.algorithm = RS256
4334 sasl.oauthbearer.assertion.claim.aud = null
4335 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4336 sasl.oauthbearer.assertion.claim.iss = null
4337 sasl.oauthbearer.assertion.claim.jti.include = false
4338 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4339 sasl.oauthbearer.assertion.claim.sub = null
4340 sasl.oauthbearer.assertion.file = null
4341 sasl.oauthbearer.assertion.private.key.file = null
4342 sasl.oauthbearer.assertion.private.key.passphrase = null
4343 sasl.oauthbearer.assertion.template.file = null
4344 sasl.oauthbearer.client.credentials.client.id = null
4345 sasl.oauthbearer.client.credentials.client.secret = null
4346 sasl.oauthbearer.clock.skew.seconds = 30
4347 sasl.oauthbearer.expected.audience = null
4348 sasl.oauthbearer.expected.issuer = null
4349 sasl.oauthbearer.header.urlencode = false
4350 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4351 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4352 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4353 sasl.oauthbearer.jwks.endpoint.url = null
4354 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4355 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4356 sasl.oauthbearer.scope = null
4357 sasl.oauthbearer.scope.claim.name = scope
4358 sasl.oauthbearer.sub.claim.name = sub
4359 sasl.oauthbearer.token.endpoint.url = null
4360 security.protocol = PLAINTEXT
4361 security.providers = null
4362 send.buffer.bytes = 131072
4363 session.timeout.ms = 45000
4364 share.acknowledgement.mode = implicit
4365 socket.connection.setup.timeout.max.ms = 30000
4366 socket.connection.setup.timeout.ms = 10000
4367 ssl.cipher.suites = null
4368 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4369 ssl.endpoint.identification.algorithm = https
4370 ssl.engine.factory.class = null
4371 ssl.key.password = null
4372 ssl.keymanager.algorithm = SunX509
4373 ssl.keystore.certificate.chain = null
4374 ssl.keystore.key = null
4375 ssl.keystore.location = null
4376 ssl.keystore.password = null
4377 ssl.keystore.type = JKS
4378 ssl.protocol = TLSv1.3
4379 ssl.provider = null
4380 ssl.secure.random.implementation = null
4381 ssl.trustmanager.algorithm = PKIX
4382 ssl.truststore.certificates = null
4383 ssl.truststore.location = null
4384 ssl.truststore.password = null
4385 ssl.truststore.type = JKS
4386 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4387
438812:05:15.967 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
438912:05:15.970 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
439012:05:15.970 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
439112:05:15.970 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327915970
439212:05:15.971 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Subscribed to topic(s): t2
439312:05:15.977 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Cluster ID: cERjULLDRBGv7lPJWPu8sA
439412:05:15.981 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
439512:05:15.981 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439612:05:15.985 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce and requesting the member to rejoin with this id.
439712:05:15.986 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce
439812:05:15.986 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] (Re-)joining group
439912:05:15.987 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce joins group embedded-kafka-spec in Empty state. Adding to the group now.
440012:05:15.988 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce).
440112:05:18.988 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 1 with 1 members.
440212:05:18.989 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce', protocol='range'}
440312:05:18.990 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Finished assignment for group at generation 1: {consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce=Assignment(partitions=[t2-0])}
440412:05:18.991 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce for group embedded-kafka-spec for generation 1. The group has 1 members, 0 of which are static.
440512:05:18.998 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=1, memberId='consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce', protocol='range'}
440612:05:18.998 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t2-0])
440712:05:18.998 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t2-0]
440812:05:19.000 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Found no committed offset for partition t2-0
440912:05:19.002 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting offset for partition t2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
441012:05:25.395 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t2-0]
441112:05:25.396 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
441212:05:25.396 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
441312:05:25.396 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-2, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
441412:05:25.396 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
441512:05:25.396 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-2-543abb71-e8d7-484b-a0b4-c194eafebcce) members.).
441612:05:25.396 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 2 is now empty.
441712:05:25.404 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
441812:05:25.404 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
441912:05:25.404 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
442012:05:25.404 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
442112:05:25.406 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-2 unregistered
442212:05:25.409 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4423 acks = -1
4424 batch.size = 16384
4425 bootstrap.servers = [localhost:6001]
4426 buffer.memory = 33554432
4427 client.dns.lookup = use_all_dns_ips
4428 client.id = producer-6
4429 compression.gzip.level = -1
4430 compression.lz4.level = 9
4431 compression.type = none
4432 compression.zstd.level = 3
4433 connections.max.idle.ms = 540000
4434 delivery.timeout.ms = 120000
4435 enable.idempotence = true
4436 enable.metrics.push = true
4437 interceptor.classes = []
4438 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4439 linger.ms = 5
4440 max.block.ms = 10000
4441 max.in.flight.requests.per.connection = 5
4442 max.request.size = 1048576
4443 metadata.max.age.ms = 300000
4444 metadata.max.idle.ms = 300000
4445 metadata.recovery.rebootstrap.trigger.ms = 300000
4446 metadata.recovery.strategy = rebootstrap
4447 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4448 metrics.num.samples = 2
4449 metrics.recording.level = INFO
4450 metrics.sample.window.ms = 30000
4451 partitioner.adaptive.partitioning.enable = true
4452 partitioner.availability.timeout.ms = 0
4453 partitioner.class = null
4454 partitioner.ignore.keys = false
4455 receive.buffer.bytes = 32768
4456 reconnect.backoff.max.ms = 1000
4457 reconnect.backoff.ms = 50
4458 request.timeout.ms = 30000
4459 retries = 2147483647
4460 retry.backoff.max.ms = 1000
4461 retry.backoff.ms = 1000
4462 sasl.client.callback.handler.class = null
4463 sasl.jaas.config = null
4464 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4465 sasl.kerberos.min.time.before.relogin = 60000
4466 sasl.kerberos.service.name = null
4467 sasl.kerberos.ticket.renew.jitter = 0.05
4468 sasl.kerberos.ticket.renew.window.factor = 0.8
4469 sasl.login.callback.handler.class = null
4470 sasl.login.class = null
4471 sasl.login.connect.timeout.ms = null
4472 sasl.login.read.timeout.ms = null
4473 sasl.login.refresh.buffer.seconds = 300
4474 sasl.login.refresh.min.period.seconds = 60
4475 sasl.login.refresh.window.factor = 0.8
4476 sasl.login.refresh.window.jitter = 0.05
4477 sasl.login.retry.backoff.max.ms = 10000
4478 sasl.login.retry.backoff.ms = 100
4479 sasl.mechanism = GSSAPI
4480 sasl.oauthbearer.assertion.algorithm = RS256
4481 sasl.oauthbearer.assertion.claim.aud = null
4482 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4483 sasl.oauthbearer.assertion.claim.iss = null
4484 sasl.oauthbearer.assertion.claim.jti.include = false
4485 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4486 sasl.oauthbearer.assertion.claim.sub = null
4487 sasl.oauthbearer.assertion.file = null
4488 sasl.oauthbearer.assertion.private.key.file = null
4489 sasl.oauthbearer.assertion.private.key.passphrase = null
4490 sasl.oauthbearer.assertion.template.file = null
4491 sasl.oauthbearer.client.credentials.client.id = null
4492 sasl.oauthbearer.client.credentials.client.secret = null
4493 sasl.oauthbearer.clock.skew.seconds = 30
4494 sasl.oauthbearer.expected.audience = null
4495 sasl.oauthbearer.expected.issuer = null
4496 sasl.oauthbearer.header.urlencode = false
4497 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4498 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4499 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4500 sasl.oauthbearer.jwks.endpoint.url = null
4501 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4502 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4503 sasl.oauthbearer.scope = null
4504 sasl.oauthbearer.scope.claim.name = scope
4505 sasl.oauthbearer.sub.claim.name = sub
4506 sasl.oauthbearer.token.endpoint.url = null
4507 security.protocol = PLAINTEXT
4508 security.providers = null
4509 send.buffer.bytes = 131072
4510 socket.connection.setup.timeout.max.ms = 30000
4511 socket.connection.setup.timeout.ms = 10000
4512 ssl.cipher.suites = null
4513 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4514 ssl.endpoint.identification.algorithm = https
4515 ssl.engine.factory.class = null
4516 ssl.key.password = null
4517 ssl.keymanager.algorithm = SunX509
4518 ssl.keystore.certificate.chain = null
4519 ssl.keystore.key = null
4520 ssl.keystore.location = null
4521 ssl.keystore.password = null
4522 ssl.keystore.type = JKS
4523 ssl.protocol = TLSv1.3
4524 ssl.provider = null
4525 ssl.secure.random.implementation = null
4526 ssl.trustmanager.algorithm = PKIX
4527 ssl.truststore.certificates = null
4528 ssl.truststore.location = null
4529 ssl.truststore.password = null
4530 ssl.truststore.type = JKS
4531 transaction.timeout.ms = 60000
4532 transaction.two.phase.commit.enable = false
4533 transactional.id = null
4534 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4535
453612:05:25.409 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
453712:05:25.410 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Instantiated an idempotent producer.
453812:05:25.412 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
453912:05:25.412 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
454012:05:25.412 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327925412
454112:05:25.416 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_1) to the active controller.
454212:05:25.417 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
454312:05:25.418 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_1 with topic ID A987AzvLTciln99rjz3YXw.
454412:05:25.418 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_1-0 with topic ID A987AzvLTciln99rjz3YXw and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
454512:05:25.419 [kafka-producer-network-thread | producer-6] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-6] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t3_1=UNKNOWN_TOPIC_OR_PARTITION}
454612:05:25.419 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.Metadata - [Producer clientId=producer-6] Cluster ID: cERjULLDRBGv7lPJWPu8sA
454712:05:25.420 [kafka-producer-network-thread | producer-6] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-6] ProducerId set to 5 with epoch 0
454812:05:25.443 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
454912:05:25.443 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_1-0)
455012:05:25.443 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_1-0 with topic id A987AzvLTciln99rjz3YXw.
455112:05:25.445 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_1-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
455212:05:25.446 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_1-0 in /tmp/kafka-logs4345019044203235659/t3_1-0 with properties {}
455312:05:25.446 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] No checkpointed highwatermark is found for partition t3_1-0
455412:05:25.446 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_1-0 broker=0] Log loaded for partition t3_1-0 with initial high watermark 0
455512:05:25.446 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_1-0 with topic id Some(A987AzvLTciln99rjz3YXw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
455612:05:26.430 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-6] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
455712:05:26.432 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
455812:05:26.432 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
455912:05:26.432 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
456012:05:26.432 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
456112:05:26.432 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-6 unregistered
456212:05:26.434 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4563 acks = -1
4564 batch.size = 16384
4565 bootstrap.servers = [localhost:6001]
4566 buffer.memory = 33554432
4567 client.dns.lookup = use_all_dns_ips
4568 client.id = producer-7
4569 compression.gzip.level = -1
4570 compression.lz4.level = 9
4571 compression.type = none
4572 compression.zstd.level = 3
4573 connections.max.idle.ms = 540000
4574 delivery.timeout.ms = 120000
4575 enable.idempotence = true
4576 enable.metrics.push = true
4577 interceptor.classes = []
4578 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4579 linger.ms = 5
4580 max.block.ms = 10000
4581 max.in.flight.requests.per.connection = 5
4582 max.request.size = 1048576
4583 metadata.max.age.ms = 300000
4584 metadata.max.idle.ms = 300000
4585 metadata.recovery.rebootstrap.trigger.ms = 300000
4586 metadata.recovery.strategy = rebootstrap
4587 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4588 metrics.num.samples = 2
4589 metrics.recording.level = INFO
4590 metrics.sample.window.ms = 30000
4591 partitioner.adaptive.partitioning.enable = true
4592 partitioner.availability.timeout.ms = 0
4593 partitioner.class = null
4594 partitioner.ignore.keys = false
4595 receive.buffer.bytes = 32768
4596 reconnect.backoff.max.ms = 1000
4597 reconnect.backoff.ms = 50
4598 request.timeout.ms = 30000
4599 retries = 2147483647
4600 retry.backoff.max.ms = 1000
4601 retry.backoff.ms = 1000
4602 sasl.client.callback.handler.class = null
4603 sasl.jaas.config = null
4604 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4605 sasl.kerberos.min.time.before.relogin = 60000
4606 sasl.kerberos.service.name = null
4607 sasl.kerberos.ticket.renew.jitter = 0.05
4608 sasl.kerberos.ticket.renew.window.factor = 0.8
4609 sasl.login.callback.handler.class = null
4610 sasl.login.class = null
4611 sasl.login.connect.timeout.ms = null
4612 sasl.login.read.timeout.ms = null
4613 sasl.login.refresh.buffer.seconds = 300
4614 sasl.login.refresh.min.period.seconds = 60
4615 sasl.login.refresh.window.factor = 0.8
4616 sasl.login.refresh.window.jitter = 0.05
4617 sasl.login.retry.backoff.max.ms = 10000
4618 sasl.login.retry.backoff.ms = 100
4619 sasl.mechanism = GSSAPI
4620 sasl.oauthbearer.assertion.algorithm = RS256
4621 sasl.oauthbearer.assertion.claim.aud = null
4622 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4623 sasl.oauthbearer.assertion.claim.iss = null
4624 sasl.oauthbearer.assertion.claim.jti.include = false
4625 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4626 sasl.oauthbearer.assertion.claim.sub = null
4627 sasl.oauthbearer.assertion.file = null
4628 sasl.oauthbearer.assertion.private.key.file = null
4629 sasl.oauthbearer.assertion.private.key.passphrase = null
4630 sasl.oauthbearer.assertion.template.file = null
4631 sasl.oauthbearer.client.credentials.client.id = null
4632 sasl.oauthbearer.client.credentials.client.secret = null
4633 sasl.oauthbearer.clock.skew.seconds = 30
4634 sasl.oauthbearer.expected.audience = null
4635 sasl.oauthbearer.expected.issuer = null
4636 sasl.oauthbearer.header.urlencode = false
4637 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4638 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4639 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4640 sasl.oauthbearer.jwks.endpoint.url = null
4641 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4642 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4643 sasl.oauthbearer.scope = null
4644 sasl.oauthbearer.scope.claim.name = scope
4645 sasl.oauthbearer.sub.claim.name = sub
4646 sasl.oauthbearer.token.endpoint.url = null
4647 security.protocol = PLAINTEXT
4648 security.providers = null
4649 send.buffer.bytes = 131072
4650 socket.connection.setup.timeout.max.ms = 30000
4651 socket.connection.setup.timeout.ms = 10000
4652 ssl.cipher.suites = null
4653 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4654 ssl.endpoint.identification.algorithm = https
4655 ssl.engine.factory.class = null
4656 ssl.key.password = null
4657 ssl.keymanager.algorithm = SunX509
4658 ssl.keystore.certificate.chain = null
4659 ssl.keystore.key = null
4660 ssl.keystore.location = null
4661 ssl.keystore.password = null
4662 ssl.keystore.type = JKS
4663 ssl.protocol = TLSv1.3
4664 ssl.provider = null
4665 ssl.secure.random.implementation = null
4666 ssl.trustmanager.algorithm = PKIX
4667 ssl.truststore.certificates = null
4668 ssl.truststore.location = null
4669 ssl.truststore.password = null
4670 ssl.truststore.type = JKS
4671 transaction.timeout.ms = 60000
4672 transaction.two.phase.commit.enable = false
4673 transactional.id = null
4674 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4675
467612:05:26.434 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
467712:05:26.434 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Instantiated an idempotent producer.
467812:05:26.436 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
467912:05:26.436 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
468012:05:26.436 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327926436
468112:05:26.439 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.Metadata - [Producer clientId=producer-7] Cluster ID: cERjULLDRBGv7lPJWPu8sA
468212:05:26.440 [kafka-producer-network-thread | producer-7] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-7] ProducerId set to 6 with epoch 0
468312:05:26.449 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-7] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
468412:05:26.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
468512:05:26.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
468612:05:26.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
468712:05:26.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
468812:05:26.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-7 unregistered
468912:05:26.452 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
4690 acks = -1
4691 batch.size = 16384
4692 bootstrap.servers = [localhost:6001]
4693 buffer.memory = 33554432
4694 client.dns.lookup = use_all_dns_ips
4695 client.id = producer-8
4696 compression.gzip.level = -1
4697 compression.lz4.level = 9
4698 compression.type = none
4699 compression.zstd.level = 3
4700 connections.max.idle.ms = 540000
4701 delivery.timeout.ms = 120000
4702 enable.idempotence = true
4703 enable.metrics.push = true
4704 interceptor.classes = []
4705 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
4706 linger.ms = 5
4707 max.block.ms = 10000
4708 max.in.flight.requests.per.connection = 5
4709 max.request.size = 1048576
4710 metadata.max.age.ms = 300000
4711 metadata.max.idle.ms = 300000
4712 metadata.recovery.rebootstrap.trigger.ms = 300000
4713 metadata.recovery.strategy = rebootstrap
4714 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4715 metrics.num.samples = 2
4716 metrics.recording.level = INFO
4717 metrics.sample.window.ms = 30000
4718 partitioner.adaptive.partitioning.enable = true
4719 partitioner.availability.timeout.ms = 0
4720 partitioner.class = null
4721 partitioner.ignore.keys = false
4722 receive.buffer.bytes = 32768
4723 reconnect.backoff.max.ms = 1000
4724 reconnect.backoff.ms = 50
4725 request.timeout.ms = 30000
4726 retries = 2147483647
4727 retry.backoff.max.ms = 1000
4728 retry.backoff.ms = 1000
4729 sasl.client.callback.handler.class = null
4730 sasl.jaas.config = null
4731 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4732 sasl.kerberos.min.time.before.relogin = 60000
4733 sasl.kerberos.service.name = null
4734 sasl.kerberos.ticket.renew.jitter = 0.05
4735 sasl.kerberos.ticket.renew.window.factor = 0.8
4736 sasl.login.callback.handler.class = null
4737 sasl.login.class = null
4738 sasl.login.connect.timeout.ms = null
4739 sasl.login.read.timeout.ms = null
4740 sasl.login.refresh.buffer.seconds = 300
4741 sasl.login.refresh.min.period.seconds = 60
4742 sasl.login.refresh.window.factor = 0.8
4743 sasl.login.refresh.window.jitter = 0.05
4744 sasl.login.retry.backoff.max.ms = 10000
4745 sasl.login.retry.backoff.ms = 100
4746 sasl.mechanism = GSSAPI
4747 sasl.oauthbearer.assertion.algorithm = RS256
4748 sasl.oauthbearer.assertion.claim.aud = null
4749 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4750 sasl.oauthbearer.assertion.claim.iss = null
4751 sasl.oauthbearer.assertion.claim.jti.include = false
4752 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4753 sasl.oauthbearer.assertion.claim.sub = null
4754 sasl.oauthbearer.assertion.file = null
4755 sasl.oauthbearer.assertion.private.key.file = null
4756 sasl.oauthbearer.assertion.private.key.passphrase = null
4757 sasl.oauthbearer.assertion.template.file = null
4758 sasl.oauthbearer.client.credentials.client.id = null
4759 sasl.oauthbearer.client.credentials.client.secret = null
4760 sasl.oauthbearer.clock.skew.seconds = 30
4761 sasl.oauthbearer.expected.audience = null
4762 sasl.oauthbearer.expected.issuer = null
4763 sasl.oauthbearer.header.urlencode = false
4764 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4765 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4766 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4767 sasl.oauthbearer.jwks.endpoint.url = null
4768 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4769 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4770 sasl.oauthbearer.scope = null
4771 sasl.oauthbearer.scope.claim.name = scope
4772 sasl.oauthbearer.sub.claim.name = sub
4773 sasl.oauthbearer.token.endpoint.url = null
4774 security.protocol = PLAINTEXT
4775 security.providers = null
4776 send.buffer.bytes = 131072
4777 socket.connection.setup.timeout.max.ms = 30000
4778 socket.connection.setup.timeout.ms = 10000
4779 ssl.cipher.suites = null
4780 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4781 ssl.endpoint.identification.algorithm = https
4782 ssl.engine.factory.class = null
4783 ssl.key.password = null
4784 ssl.keymanager.algorithm = SunX509
4785 ssl.keystore.certificate.chain = null
4786 ssl.keystore.key = null
4787 ssl.keystore.location = null
4788 ssl.keystore.password = null
4789 ssl.keystore.type = JKS
4790 ssl.protocol = TLSv1.3
4791 ssl.provider = null
4792 ssl.secure.random.implementation = null
4793 ssl.trustmanager.algorithm = PKIX
4794 ssl.truststore.certificates = null
4795 ssl.truststore.location = null
4796 ssl.truststore.password = null
4797 ssl.truststore.type = JKS
4798 transaction.timeout.ms = 60000
4799 transaction.two.phase.commit.enable = false
4800 transactional.id = null
4801 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
4802
480312:05:26.452 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
480412:05:26.452 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Instantiated an idempotent producer.
480512:05:26.454 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
480612:05:26.454 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
480712:05:26.454 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327926454
480812:05:26.456 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.Metadata - [Producer clientId=producer-8] Cluster ID: cERjULLDRBGv7lPJWPu8sA
480912:05:26.457 [kafka-producer-network-thread | producer-8] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-8] ProducerId set to 7 with epoch 0
481012:05:26.465 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-8] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
481112:05:26.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
481212:05:26.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
481312:05:26.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
481412:05:26.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
481512:05:26.467 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-8 unregistered
481612:05:26.468 [virtual-635] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4817 allow.auto.create.topics = true
4818 auto.commit.interval.ms = 5000
4819 auto.offset.reset = earliest
4820 bootstrap.servers = [localhost:6001]
4821 check.crcs = true
4822 client.dns.lookup = use_all_dns_ips
4823 client.id = consumer-g3_1-3
4824 client.rack =
4825 connections.max.idle.ms = 540000
4826 default.api.timeout.ms = 60000
4827 enable.auto.commit = false
4828 enable.metrics.push = true
4829 exclude.internal.topics = true
4830 fetch.max.bytes = 52428800
4831 fetch.max.wait.ms = 500
4832 fetch.min.bytes = 1
4833 group.id = g3_1
4834 group.instance.id = null
4835 group.protocol = classic
4836 group.remote.assignor = null
4837 heartbeat.interval.ms = 3000
4838 interceptor.classes = []
4839 internal.leave.group.on.close = true
4840 internal.throw.on.fetch.stable.offset.unsupported = false
4841 isolation.level = read_uncommitted
4842 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4843 max.partition.fetch.bytes = 1048576
4844 max.poll.interval.ms = 300000
4845 max.poll.records = 500
4846 metadata.max.age.ms = 300000
4847 metadata.recovery.rebootstrap.trigger.ms = 300000
4848 metadata.recovery.strategy = rebootstrap
4849 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4850 metrics.num.samples = 2
4851 metrics.recording.level = INFO
4852 metrics.sample.window.ms = 30000
4853 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4854 receive.buffer.bytes = 65536
4855 reconnect.backoff.max.ms = 1000
4856 reconnect.backoff.ms = 50
4857 request.timeout.ms = 30000
4858 retry.backoff.max.ms = 1000
4859 retry.backoff.ms = 100
4860 sasl.client.callback.handler.class = null
4861 sasl.jaas.config = null
4862 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4863 sasl.kerberos.min.time.before.relogin = 60000
4864 sasl.kerberos.service.name = null
4865 sasl.kerberos.ticket.renew.jitter = 0.05
4866 sasl.kerberos.ticket.renew.window.factor = 0.8
4867 sasl.login.callback.handler.class = null
4868 sasl.login.class = null
4869 sasl.login.connect.timeout.ms = null
4870 sasl.login.read.timeout.ms = null
4871 sasl.login.refresh.buffer.seconds = 300
4872 sasl.login.refresh.min.period.seconds = 60
4873 sasl.login.refresh.window.factor = 0.8
4874 sasl.login.refresh.window.jitter = 0.05
4875 sasl.login.retry.backoff.max.ms = 10000
4876 sasl.login.retry.backoff.ms = 100
4877 sasl.mechanism = GSSAPI
4878 sasl.oauthbearer.assertion.algorithm = RS256
4879 sasl.oauthbearer.assertion.claim.aud = null
4880 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4881 sasl.oauthbearer.assertion.claim.iss = null
4882 sasl.oauthbearer.assertion.claim.jti.include = false
4883 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
4884 sasl.oauthbearer.assertion.claim.sub = null
4885 sasl.oauthbearer.assertion.file = null
4886 sasl.oauthbearer.assertion.private.key.file = null
4887 sasl.oauthbearer.assertion.private.key.passphrase = null
4888 sasl.oauthbearer.assertion.template.file = null
4889 sasl.oauthbearer.client.credentials.client.id = null
4890 sasl.oauthbearer.client.credentials.client.secret = null
4891 sasl.oauthbearer.clock.skew.seconds = 30
4892 sasl.oauthbearer.expected.audience = null
4893 sasl.oauthbearer.expected.issuer = null
4894 sasl.oauthbearer.header.urlencode = false
4895 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
4896 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
4897 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
4898 sasl.oauthbearer.jwks.endpoint.url = null
4899 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
4900 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
4901 sasl.oauthbearer.scope = null
4902 sasl.oauthbearer.scope.claim.name = scope
4903 sasl.oauthbearer.sub.claim.name = sub
4904 sasl.oauthbearer.token.endpoint.url = null
4905 security.protocol = PLAINTEXT
4906 security.providers = null
4907 send.buffer.bytes = 131072
4908 session.timeout.ms = 45000
4909 share.acknowledgement.mode = implicit
4910 socket.connection.setup.timeout.max.ms = 30000
4911 socket.connection.setup.timeout.ms = 10000
4912 ssl.cipher.suites = null
4913 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
4914 ssl.endpoint.identification.algorithm = https
4915 ssl.engine.factory.class = null
4916 ssl.key.password = null
4917 ssl.keymanager.algorithm = SunX509
4918 ssl.keystore.certificate.chain = null
4919 ssl.keystore.key = null
4920 ssl.keystore.location = null
4921 ssl.keystore.password = null
4922 ssl.keystore.type = JKS
4923 ssl.protocol = TLSv1.3
4924 ssl.provider = null
4925 ssl.secure.random.implementation = null
4926 ssl.trustmanager.algorithm = PKIX
4927 ssl.truststore.certificates = null
4928 ssl.truststore.location = null
4929 ssl.truststore.password = null
4930 ssl.truststore.type = JKS
4931 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4932
493312:05:26.468 [virtual-635] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
493412:05:26.468 [virtual-637] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
4935 allow.auto.create.topics = true
4936 auto.commit.interval.ms = 5000
4937 auto.offset.reset = earliest
4938 bootstrap.servers = [localhost:6001]
4939 check.crcs = true
4940 client.dns.lookup = use_all_dns_ips
4941 client.id = consumer-g3_1-4
4942 client.rack =
4943 connections.max.idle.ms = 540000
4944 default.api.timeout.ms = 60000
4945 enable.auto.commit = false
4946 enable.metrics.push = true
4947 exclude.internal.topics = true
4948 fetch.max.bytes = 52428800
4949 fetch.max.wait.ms = 500
4950 fetch.min.bytes = 1
4951 group.id = g3_1
4952 group.instance.id = null
4953 group.protocol = classic
4954 group.remote.assignor = null
4955 heartbeat.interval.ms = 3000
4956 interceptor.classes = []
4957 internal.leave.group.on.close = true
4958 internal.throw.on.fetch.stable.offset.unsupported = false
4959 isolation.level = read_uncommitted
4960 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
4961 max.partition.fetch.bytes = 1048576
4962 max.poll.interval.ms = 300000
4963 max.poll.records = 500
4964 metadata.max.age.ms = 300000
4965 metadata.recovery.rebootstrap.trigger.ms = 300000
4966 metadata.recovery.strategy = rebootstrap
4967 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
4968 metrics.num.samples = 2
4969 metrics.recording.level = INFO
4970 metrics.sample.window.ms = 30000
4971 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
4972 receive.buffer.bytes = 65536
4973 reconnect.backoff.max.ms = 1000
4974 reconnect.backoff.ms = 50
4975 request.timeout.ms = 30000
4976 retry.backoff.max.ms = 1000
4977 retry.backoff.ms = 100
4978 sasl.client.callback.handler.class = null
4979 sasl.jaas.config = null
4980 sasl.kerberos.kinit.cmd = /usr/bin/kinit
4981 sasl.kerberos.min.time.before.relogin = 60000
4982 sasl.kerberos.service.name = null
4983 sasl.kerberos.ticket.renew.jitter = 0.05
4984 sasl.kerberos.ticket.renew.window.factor = 0.8
4985 sasl.login.callback.handler.class = null
4986 sasl.login.class = null
4987 sasl.login.connect.timeout.ms = null
4988 sasl.login.read.timeout.ms = null
4989 sasl.login.refresh.buffer.seconds = 300
4990 sasl.login.refresh.min.period.seconds = 60
4991 sasl.login.refresh.window.factor = 0.8
4992 sasl.login.refresh.window.jitter = 0.05
4993 sasl.login.retry.backoff.max.ms = 10000
4994 sasl.login.retry.backoff.ms = 100
4995 sasl.mechanism = GSSAPI
4996 sasl.oauthbearer.assertion.algorithm = RS256
4997 sasl.oauthbearer.assertion.claim.aud = null
4998 sasl.oauthbearer.assertion.claim.exp.seconds = 300
4999 sasl.oauthbearer.assertion.claim.iss = null
5000 sasl.oauthbearer.assertion.claim.jti.include = false
5001 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5002 sasl.oauthbearer.assertion.claim.sub = null
5003 sasl.oauthbearer.assertion.file = null
5004 sasl.oauthbearer.assertion.private.key.file = null
5005 sasl.oauthbearer.assertion.private.key.passphrase = null
5006 sasl.oauthbearer.assertion.template.file = null
5007 sasl.oauthbearer.client.credentials.client.id = null
5008 sasl.oauthbearer.client.credentials.client.secret = null
5009 sasl.oauthbearer.clock.skew.seconds = 30
5010 sasl.oauthbearer.expected.audience = null
5011 sasl.oauthbearer.expected.issuer = null
5012 sasl.oauthbearer.header.urlencode = false
5013 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5014 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5015 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5016 sasl.oauthbearer.jwks.endpoint.url = null
5017 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5018 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5019 sasl.oauthbearer.scope = null
5020 sasl.oauthbearer.scope.claim.name = scope
5021 sasl.oauthbearer.sub.claim.name = sub
5022 sasl.oauthbearer.token.endpoint.url = null
5023 security.protocol = PLAINTEXT
5024 security.providers = null
5025 send.buffer.bytes = 131072
5026 session.timeout.ms = 45000
5027 share.acknowledgement.mode = implicit
5028 socket.connection.setup.timeout.max.ms = 30000
5029 socket.connection.setup.timeout.ms = 10000
5030 ssl.cipher.suites = null
5031 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5032 ssl.endpoint.identification.algorithm = https
5033 ssl.engine.factory.class = null
5034 ssl.key.password = null
5035 ssl.keymanager.algorithm = SunX509
5036 ssl.keystore.certificate.chain = null
5037 ssl.keystore.key = null
5038 ssl.keystore.location = null
5039 ssl.keystore.password = null
5040 ssl.keystore.type = JKS
5041 ssl.protocol = TLSv1.3
5042 ssl.provider = null
5043 ssl.secure.random.implementation = null
5044 ssl.trustmanager.algorithm = PKIX
5045 ssl.truststore.certificates = null
5046 ssl.truststore.location = null
5047 ssl.truststore.password = null
5048 ssl.truststore.type = JKS
5049 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5050
505112:05:26.468 [virtual-637] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
505212:05:26.471 [virtual-637] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505312:05:26.471 [virtual-637] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505412:05:26.471 [virtual-637] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327926471
505512:05:26.472 [virtual-635] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
505612:05:26.472 [virtual-635] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
505712:05:26.473 [virtual-635] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327926472
505812:05:26.473 [virtual-637] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5059 acks = -1
5060 batch.size = 16384
5061 bootstrap.servers = [localhost:6001]
5062 buffer.memory = 33554432
5063 client.dns.lookup = use_all_dns_ips
5064 client.id = producer-9
5065 compression.gzip.level = -1
5066 compression.lz4.level = 9
5067 compression.type = none
5068 compression.zstd.level = 3
5069 connections.max.idle.ms = 540000
5070 delivery.timeout.ms = 120000
5071 enable.idempotence = true
5072 enable.metrics.push = true
5073 interceptor.classes = []
5074 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5075 linger.ms = 5
5076 max.block.ms = 60000
5077 max.in.flight.requests.per.connection = 5
5078 max.request.size = 1048576
5079 metadata.max.age.ms = 300000
5080 metadata.max.idle.ms = 300000
5081 metadata.recovery.rebootstrap.trigger.ms = 300000
5082 metadata.recovery.strategy = rebootstrap
5083 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5084 metrics.num.samples = 2
5085 metrics.recording.level = INFO
5086 metrics.sample.window.ms = 30000
5087 partitioner.adaptive.partitioning.enable = true
5088 partitioner.availability.timeout.ms = 0
5089 partitioner.class = null
5090 partitioner.ignore.keys = false
5091 receive.buffer.bytes = 32768
5092 reconnect.backoff.max.ms = 1000
5093 reconnect.backoff.ms = 50
5094 request.timeout.ms = 30000
5095 retries = 2147483647
5096 retry.backoff.max.ms = 1000
5097 retry.backoff.ms = 100
5098 sasl.client.callback.handler.class = null
5099 sasl.jaas.config = null
5100 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5101 sasl.kerberos.min.time.before.relogin = 60000
5102 sasl.kerberos.service.name = null
5103 sasl.kerberos.ticket.renew.jitter = 0.05
5104 sasl.kerberos.ticket.renew.window.factor = 0.8
5105 sasl.login.callback.handler.class = null
5106 sasl.login.class = null
5107 sasl.login.connect.timeout.ms = null
5108 sasl.login.read.timeout.ms = null
5109 sasl.login.refresh.buffer.seconds = 300
5110 sasl.login.refresh.min.period.seconds = 60
5111 sasl.login.refresh.window.factor = 0.8
5112 sasl.login.refresh.window.jitter = 0.05
5113 sasl.login.retry.backoff.max.ms = 10000
5114 sasl.login.retry.backoff.ms = 100
5115 sasl.mechanism = GSSAPI
5116 sasl.oauthbearer.assertion.algorithm = RS256
5117 sasl.oauthbearer.assertion.claim.aud = null
5118 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5119 sasl.oauthbearer.assertion.claim.iss = null
5120 sasl.oauthbearer.assertion.claim.jti.include = false
5121 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5122 sasl.oauthbearer.assertion.claim.sub = null
5123 sasl.oauthbearer.assertion.file = null
5124 sasl.oauthbearer.assertion.private.key.file = null
5125 sasl.oauthbearer.assertion.private.key.passphrase = null
5126 sasl.oauthbearer.assertion.template.file = null
5127 sasl.oauthbearer.client.credentials.client.id = null
5128 sasl.oauthbearer.client.credentials.client.secret = null
5129 sasl.oauthbearer.clock.skew.seconds = 30
5130 sasl.oauthbearer.expected.audience = null
5131 sasl.oauthbearer.expected.issuer = null
5132 sasl.oauthbearer.header.urlencode = false
5133 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5134 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5135 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5136 sasl.oauthbearer.jwks.endpoint.url = null
5137 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5138 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5139 sasl.oauthbearer.scope = null
5140 sasl.oauthbearer.scope.claim.name = scope
5141 sasl.oauthbearer.sub.claim.name = sub
5142 sasl.oauthbearer.token.endpoint.url = null
5143 security.protocol = PLAINTEXT
5144 security.providers = null
5145 send.buffer.bytes = 131072
5146 socket.connection.setup.timeout.max.ms = 30000
5147 socket.connection.setup.timeout.ms = 10000
5148 ssl.cipher.suites = null
5149 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5150 ssl.endpoint.identification.algorithm = https
5151 ssl.engine.factory.class = null
5152 ssl.key.password = null
5153 ssl.keymanager.algorithm = SunX509
5154 ssl.keystore.certificate.chain = null
5155 ssl.keystore.key = null
5156 ssl.keystore.location = null
5157 ssl.keystore.password = null
5158 ssl.keystore.type = JKS
5159 ssl.protocol = TLSv1.3
5160 ssl.provider = null
5161 ssl.secure.random.implementation = null
5162 ssl.trustmanager.algorithm = PKIX
5163 ssl.truststore.certificates = null
5164 ssl.truststore.location = null
5165 ssl.truststore.password = null
5166 ssl.truststore.type = JKS
5167 transaction.timeout.ms = 60000
5168 transaction.two.phase.commit.enable = false
5169 transactional.id = null
5170 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5171
517212:05:26.474 [virtual-637] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
517312:05:26.474 [virtual-641] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Subscribed to topic(s): t3_2
517412:05:26.474 [virtual-637] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Instantiated an idempotent producer.
517512:05:26.478 [virtual-637] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
517612:05:26.478 [virtual-637] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
517712:05:26.478 [virtual-637] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327926478
517812:05:26.479 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t3_2) to the active controller.
517912:05:26.479 [virtual-638] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Subscribed to topic(s): t3_1
518012:05:26.479 [virtual-641] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t3_2=UNKNOWN_TOPIC_OR_PARTITION}
518112:05:26.480 [virtual-641] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
518212:05:26.480 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
518312:05:26.482 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.Metadata - [Producer clientId=producer-9] Cluster ID: cERjULLDRBGv7lPJWPu8sA
518412:05:26.482 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t3_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
518512:05:26.484 [kafka-producer-network-thread | producer-9] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-9] ProducerId set to 8 with epoch 0
518612:05:26.484 [virtual-638] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
518712:05:26.485 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
518812:05:26.485 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t3_2 with topic ID CJasYBHwQVKVwKY2ZWzDhw.
518912:05:26.486 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t3_2-0 with topic ID CJasYBHwQVKVwKY2ZWzDhw and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
519012:05:26.486 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
519112:05:26.487 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
519212:05:26.489 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7 and requesting the member to rejoin with this id.
519312:05:26.490 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7
519412:05:26.490 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] (Re-)joining group
519512:05:26.491 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d and requesting the member to rejoin with this id.
519612:05:26.492 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d
519712:05:26.492 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] (Re-)joining group
519812:05:26.492 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7 joins group g3_1 in Empty state. Adding to the group now.
519912:05:26.492 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7).
520012:05:26.494 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d joins group g3_1 in PreparingRebalance state. Adding to the group now.
520112:05:26.512 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
520212:05:26.512 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t3_2-0)
520312:05:26.512 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t3_2-0 with topic id CJasYBHwQVKVwKY2ZWzDhw.
520412:05:26.514 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t3_2-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
520512:05:26.515 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t3_2-0 in /tmp/kafka-logs4345019044203235659/t3_2-0 with properties {}
520612:05:26.515 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] No checkpointed highwatermark is found for partition t3_2-0
520712:05:26.515 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t3_2-0 broker=0] Log loaded for partition t3_2-0 with initial high watermark 0
520812:05:26.515 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t3_2-0 with topic id Some(CJasYBHwQVKVwKY2ZWzDhw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
520912:05:32.493 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 1 with 2 members.
521012:05:32.493 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7', protocol='range'}
521112:05:32.493 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d', protocol='range'}
521212:05:32.495 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Finished assignment for group at generation 1: {consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7=Assignment(partitions=[t3_2-0]), consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d=Assignment(partitions=[t3_1-0])}
521312:05:32.495 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7 for group g3_1 for generation 1. The group has 2 members, 0 of which are static.
521412:05:32.502 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7', protocol='range'}
521512:05:32.502 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d', protocol='range'}
521612:05:32.502 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_2-0])
521712:05:32.502 [virtual-641] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Adding newly assigned partitions: [t3_2-0]
521812:05:32.502 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
521912:05:32.503 [virtual-638] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
522012:05:32.504 [virtual-638] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Found no committed offset for partition t3_1-0
522112:05:32.504 [virtual-641] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Found no committed offset for partition t3_2-0
522212:05:32.505 [virtual-641] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting offset for partition t3_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522312:05:32.506 [virtual-638] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
522412:05:34.520 [virtual-640] ERROR o.k.KafkaFlow$ - Exception when polling for records
5225java.lang.InterruptedException: null
5226 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5227 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5228 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5229 at ox.channels.ActorRef.ask(actor.scala:64)
5230 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5231 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5232 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5233 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5234 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5235 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5236 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5237 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5238 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5239 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
524012:05:34.520 [virtual-638] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5241java.lang.InterruptedException: null
5242 ... 18 common frames omitted
5243Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5244 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5245 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5246 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5247 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5248 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5249 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5250 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5251 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5252 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5253 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5254 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5255 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5256 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5257 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5258 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5259 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5260 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5261 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
526212:05:34.520 [virtual-644] ERROR o.k.KafkaFlow$ - Exception when polling for records
5263java.lang.InterruptedException: null
5264 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5265 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5266 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5267 at ox.channels.ActorRef.ask(actor.scala:64)
5268 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5269 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
5270 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5271 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5272 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
5273 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5274 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
5275 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
5276 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
5277 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5278 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
5279 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
5280 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
5281 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
5282 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5283 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5284 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
528512:05:34.520 [virtual-641] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5286java.lang.InterruptedException: null
5287 ... 18 common frames omitted
5288Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5289 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5290 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5291 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5292 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5293 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5294 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5295 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5296 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5297 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5298 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5299 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5300 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5301 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5302 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5303 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5304 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5305 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5306 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
530712:05:34.521 [virtual-652] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-9] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
530812:05:34.521 [virtual-653] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Revoke previously assigned partitions [t3_2-0]
530912:05:34.522 [virtual-653] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Member consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
531012:05:34.522 [virtual-653] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
531112:05:34.522 [virtual-653] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-3, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
531212:05:34.523 [virtual-652] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
531312:05:34.523 [virtual-652] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
531412:05:34.523 [virtual-652] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
531512:05:34.523 [virtual-652] INFO o.a.k.c.m.Metrics - Metrics reporters closed
531612:05:34.524 [virtual-652] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-9 unregistered
531712:05:34.524 [virtual-651] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
531812:05:34.524 [virtual-651] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Member consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
531912:05:34.524 [virtual-651] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
532012:05:34.525 [virtual-651] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-4, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
532112:05:34.525 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
532212:05:34.525 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_1-3-58d338e6-ae12-463e-a851-1362d7357ff7) members.).
532312:05:34.526 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-4-b4b11cfd-60d4-440e-838c-6344d6d2d68d has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
532412:05:34.526 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 2 is now empty.
532512:05:34.527 [virtual-653] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
532612:05:34.527 [virtual-653] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
532712:05:34.528 [virtual-653] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
532812:05:34.528 [virtual-653] INFO o.a.k.c.m.Metrics - Metrics reporters closed
532912:05:34.529 [virtual-653] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-3 unregistered
533012:05:34.533 [virtual-651] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
533112:05:34.533 [virtual-651] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
533212:05:34.533 [virtual-651] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
533312:05:34.533 [virtual-651] INFO o.a.k.c.m.Metrics - Metrics reporters closed
533412:05:34.535 [virtual-651] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-4 unregistered
533512:05:34.536 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5336 acks = -1
5337 batch.size = 16384
5338 bootstrap.servers = [localhost:6001]
5339 buffer.memory = 33554432
5340 client.dns.lookup = use_all_dns_ips
5341 client.id = producer-10
5342 compression.gzip.level = -1
5343 compression.lz4.level = 9
5344 compression.type = none
5345 compression.zstd.level = 3
5346 connections.max.idle.ms = 540000
5347 delivery.timeout.ms = 120000
5348 enable.idempotence = true
5349 enable.metrics.push = true
5350 interceptor.classes = []
5351 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5352 linger.ms = 5
5353 max.block.ms = 10000
5354 max.in.flight.requests.per.connection = 5
5355 max.request.size = 1048576
5356 metadata.max.age.ms = 300000
5357 metadata.max.idle.ms = 300000
5358 metadata.recovery.rebootstrap.trigger.ms = 300000
5359 metadata.recovery.strategy = rebootstrap
5360 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5361 metrics.num.samples = 2
5362 metrics.recording.level = INFO
5363 metrics.sample.window.ms = 30000
5364 partitioner.adaptive.partitioning.enable = true
5365 partitioner.availability.timeout.ms = 0
5366 partitioner.class = null
5367 partitioner.ignore.keys = false
5368 receive.buffer.bytes = 32768
5369 reconnect.backoff.max.ms = 1000
5370 reconnect.backoff.ms = 50
5371 request.timeout.ms = 30000
5372 retries = 2147483647
5373 retry.backoff.max.ms = 1000
5374 retry.backoff.ms = 1000
5375 sasl.client.callback.handler.class = null
5376 sasl.jaas.config = null
5377 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5378 sasl.kerberos.min.time.before.relogin = 60000
5379 sasl.kerberos.service.name = null
5380 sasl.kerberos.ticket.renew.jitter = 0.05
5381 sasl.kerberos.ticket.renew.window.factor = 0.8
5382 sasl.login.callback.handler.class = null
5383 sasl.login.class = null
5384 sasl.login.connect.timeout.ms = null
5385 sasl.login.read.timeout.ms = null
5386 sasl.login.refresh.buffer.seconds = 300
5387 sasl.login.refresh.min.period.seconds = 60
5388 sasl.login.refresh.window.factor = 0.8
5389 sasl.login.refresh.window.jitter = 0.05
5390 sasl.login.retry.backoff.max.ms = 10000
5391 sasl.login.retry.backoff.ms = 100
5392 sasl.mechanism = GSSAPI
5393 sasl.oauthbearer.assertion.algorithm = RS256
5394 sasl.oauthbearer.assertion.claim.aud = null
5395 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5396 sasl.oauthbearer.assertion.claim.iss = null
5397 sasl.oauthbearer.assertion.claim.jti.include = false
5398 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5399 sasl.oauthbearer.assertion.claim.sub = null
5400 sasl.oauthbearer.assertion.file = null
5401 sasl.oauthbearer.assertion.private.key.file = null
5402 sasl.oauthbearer.assertion.private.key.passphrase = null
5403 sasl.oauthbearer.assertion.template.file = null
5404 sasl.oauthbearer.client.credentials.client.id = null
5405 sasl.oauthbearer.client.credentials.client.secret = null
5406 sasl.oauthbearer.clock.skew.seconds = 30
5407 sasl.oauthbearer.expected.audience = null
5408 sasl.oauthbearer.expected.issuer = null
5409 sasl.oauthbearer.header.urlencode = false
5410 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5411 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5412 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5413 sasl.oauthbearer.jwks.endpoint.url = null
5414 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5415 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5416 sasl.oauthbearer.scope = null
5417 sasl.oauthbearer.scope.claim.name = scope
5418 sasl.oauthbearer.sub.claim.name = sub
5419 sasl.oauthbearer.token.endpoint.url = null
5420 security.protocol = PLAINTEXT
5421 security.providers = null
5422 send.buffer.bytes = 131072
5423 socket.connection.setup.timeout.max.ms = 30000
5424 socket.connection.setup.timeout.ms = 10000
5425 ssl.cipher.suites = null
5426 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5427 ssl.endpoint.identification.algorithm = https
5428 ssl.engine.factory.class = null
5429 ssl.key.password = null
5430 ssl.keymanager.algorithm = SunX509
5431 ssl.keystore.certificate.chain = null
5432 ssl.keystore.key = null
5433 ssl.keystore.location = null
5434 ssl.keystore.password = null
5435 ssl.keystore.type = JKS
5436 ssl.protocol = TLSv1.3
5437 ssl.provider = null
5438 ssl.secure.random.implementation = null
5439 ssl.trustmanager.algorithm = PKIX
5440 ssl.truststore.certificates = null
5441 ssl.truststore.location = null
5442 ssl.truststore.password = null
5443 ssl.truststore.type = JKS
5444 transaction.timeout.ms = 60000
5445 transaction.two.phase.commit.enable = false
5446 transactional.id = null
5447 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5448
544912:05:34.536 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
545012:05:34.536 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Instantiated an idempotent producer.
545112:05:34.538 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
545212:05:34.538 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
545312:05:34.538 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327934538
545412:05:34.541 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.Metadata - [Producer clientId=producer-10] Cluster ID: cERjULLDRBGv7lPJWPu8sA
545512:05:34.541 [kafka-producer-network-thread | producer-10] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-10] ProducerId set to 9 with epoch 0
545612:05:34.549 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-10] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
545712:05:34.551 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
545812:05:34.551 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
545912:05:34.551 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
546012:05:34.551 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
546112:05:34.551 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-10 unregistered
546212:05:34.553 [virtual-655] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5463 allow.auto.create.topics = true
5464 auto.commit.interval.ms = 5000
5465 auto.offset.reset = earliest
5466 bootstrap.servers = [localhost:6001]
5467 check.crcs = true
5468 client.dns.lookup = use_all_dns_ips
5469 client.id = consumer-g3_1-5
5470 client.rack =
5471 connections.max.idle.ms = 540000
5472 default.api.timeout.ms = 60000
5473 enable.auto.commit = false
5474 enable.metrics.push = true
5475 exclude.internal.topics = true
5476 fetch.max.bytes = 52428800
5477 fetch.max.wait.ms = 500
5478 fetch.min.bytes = 1
5479 group.id = g3_1
5480 group.instance.id = null
5481 group.protocol = classic
5482 group.remote.assignor = null
5483 heartbeat.interval.ms = 3000
5484 interceptor.classes = []
5485 internal.leave.group.on.close = true
5486 internal.throw.on.fetch.stable.offset.unsupported = false
5487 isolation.level = read_uncommitted
5488 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5489 max.partition.fetch.bytes = 1048576
5490 max.poll.interval.ms = 300000
5491 max.poll.records = 500
5492 metadata.max.age.ms = 300000
5493 metadata.recovery.rebootstrap.trigger.ms = 300000
5494 metadata.recovery.strategy = rebootstrap
5495 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5496 metrics.num.samples = 2
5497 metrics.recording.level = INFO
5498 metrics.sample.window.ms = 30000
5499 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5500 receive.buffer.bytes = 65536
5501 reconnect.backoff.max.ms = 1000
5502 reconnect.backoff.ms = 50
5503 request.timeout.ms = 30000
5504 retry.backoff.max.ms = 1000
5505 retry.backoff.ms = 100
5506 sasl.client.callback.handler.class = null
5507 sasl.jaas.config = null
5508 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5509 sasl.kerberos.min.time.before.relogin = 60000
5510 sasl.kerberos.service.name = null
5511 sasl.kerberos.ticket.renew.jitter = 0.05
5512 sasl.kerberos.ticket.renew.window.factor = 0.8
5513 sasl.login.callback.handler.class = null
5514 sasl.login.class = null
5515 sasl.login.connect.timeout.ms = null
5516 sasl.login.read.timeout.ms = null
5517 sasl.login.refresh.buffer.seconds = 300
5518 sasl.login.refresh.min.period.seconds = 60
5519 sasl.login.refresh.window.factor = 0.8
5520 sasl.login.refresh.window.jitter = 0.05
5521 sasl.login.retry.backoff.max.ms = 10000
5522 sasl.login.retry.backoff.ms = 100
5523 sasl.mechanism = GSSAPI
5524 sasl.oauthbearer.assertion.algorithm = RS256
5525 sasl.oauthbearer.assertion.claim.aud = null
5526 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5527 sasl.oauthbearer.assertion.claim.iss = null
5528 sasl.oauthbearer.assertion.claim.jti.include = false
5529 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5530 sasl.oauthbearer.assertion.claim.sub = null
5531 sasl.oauthbearer.assertion.file = null
5532 sasl.oauthbearer.assertion.private.key.file = null
5533 sasl.oauthbearer.assertion.private.key.passphrase = null
5534 sasl.oauthbearer.assertion.template.file = null
5535 sasl.oauthbearer.client.credentials.client.id = null
5536 sasl.oauthbearer.client.credentials.client.secret = null
5537 sasl.oauthbearer.clock.skew.seconds = 30
5538 sasl.oauthbearer.expected.audience = null
5539 sasl.oauthbearer.expected.issuer = null
5540 sasl.oauthbearer.header.urlencode = false
5541 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5542 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5543 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5544 sasl.oauthbearer.jwks.endpoint.url = null
5545 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5546 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5547 sasl.oauthbearer.scope = null
5548 sasl.oauthbearer.scope.claim.name = scope
5549 sasl.oauthbearer.sub.claim.name = sub
5550 sasl.oauthbearer.token.endpoint.url = null
5551 security.protocol = PLAINTEXT
5552 security.providers = null
5553 send.buffer.bytes = 131072
5554 session.timeout.ms = 45000
5555 share.acknowledgement.mode = implicit
5556 socket.connection.setup.timeout.max.ms = 30000
5557 socket.connection.setup.timeout.ms = 10000
5558 ssl.cipher.suites = null
5559 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5560 ssl.endpoint.identification.algorithm = https
5561 ssl.engine.factory.class = null
5562 ssl.key.password = null
5563 ssl.keymanager.algorithm = SunX509
5564 ssl.keystore.certificate.chain = null
5565 ssl.keystore.key = null
5566 ssl.keystore.location = null
5567 ssl.keystore.password = null
5568 ssl.keystore.type = JKS
5569 ssl.protocol = TLSv1.3
5570 ssl.provider = null
5571 ssl.secure.random.implementation = null
5572 ssl.trustmanager.algorithm = PKIX
5573 ssl.truststore.certificates = null
5574 ssl.truststore.location = null
5575 ssl.truststore.password = null
5576 ssl.truststore.type = JKS
5577 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5578
557912:05:34.554 [virtual-655] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
558012:05:34.556 [virtual-655] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
558112:05:34.556 [virtual-655] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
558212:05:34.557 [virtual-655] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327934556
558312:05:34.557 [virtual-658] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Subscribed to topic(s): t3_1
558412:05:34.560 [virtual-658] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
558512:05:34.561 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
558612:05:34.562 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
558712:05:34.563 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_1 in Empty state. Created a new member id consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016 and requesting the member to rejoin with this id.
558812:05:34.564 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: need to re-join with the given member-id: consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016
558912:05:34.564 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] (Re-)joining group
559012:05:34.564 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016 joins group g3_1 in Empty state. Adding to the group now.
559112:05:34.564 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016 with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016).
559212:05:37.565 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_1 generation 3 with 1 members.
559312:05:37.566 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016', protocol='range'}
559412:05:37.566 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Finished assignment for group at generation 3: {consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016=Assignment(partitions=[t3_1-0])}
559512:05:37.567 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016 for group g3_1 for generation 3. The group has 1 members, 0 of which are static.
559612:05:37.573 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016', protocol='range'}
559712:05:37.573 [virtual-658] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Notifying assignor about the new Assignment(partitions=[t3_1-0])
559812:05:37.574 [virtual-658] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Adding newly assigned partitions: [t3_1-0]
559912:05:37.575 [virtual-658] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t3_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
560012:05:37.579 [virtual-655] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5601 allow.auto.create.topics = true
5602 auto.commit.interval.ms = 5000
5603 auto.offset.reset = earliest
5604 bootstrap.servers = [localhost:6001]
5605 check.crcs = true
5606 client.dns.lookup = use_all_dns_ips
5607 client.id = consumer-g3_2-6
5608 client.rack =
5609 connections.max.idle.ms = 540000
5610 default.api.timeout.ms = 60000
5611 enable.auto.commit = false
5612 enable.metrics.push = true
5613 exclude.internal.topics = true
5614 fetch.max.bytes = 52428800
5615 fetch.max.wait.ms = 500
5616 fetch.min.bytes = 1
5617 group.id = g3_2
5618 group.instance.id = null
5619 group.protocol = classic
5620 group.remote.assignor = null
5621 heartbeat.interval.ms = 3000
5622 interceptor.classes = []
5623 internal.leave.group.on.close = true
5624 internal.throw.on.fetch.stable.offset.unsupported = false
5625 isolation.level = read_uncommitted
5626 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5627 max.partition.fetch.bytes = 1048576
5628 max.poll.interval.ms = 300000
5629 max.poll.records = 500
5630 metadata.max.age.ms = 300000
5631 metadata.recovery.rebootstrap.trigger.ms = 300000
5632 metadata.recovery.strategy = rebootstrap
5633 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5634 metrics.num.samples = 2
5635 metrics.recording.level = INFO
5636 metrics.sample.window.ms = 30000
5637 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
5638 receive.buffer.bytes = 65536
5639 reconnect.backoff.max.ms = 1000
5640 reconnect.backoff.ms = 50
5641 request.timeout.ms = 30000
5642 retry.backoff.max.ms = 1000
5643 retry.backoff.ms = 100
5644 sasl.client.callback.handler.class = null
5645 sasl.jaas.config = null
5646 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5647 sasl.kerberos.min.time.before.relogin = 60000
5648 sasl.kerberos.service.name = null
5649 sasl.kerberos.ticket.renew.jitter = 0.05
5650 sasl.kerberos.ticket.renew.window.factor = 0.8
5651 sasl.login.callback.handler.class = null
5652 sasl.login.class = null
5653 sasl.login.connect.timeout.ms = null
5654 sasl.login.read.timeout.ms = null
5655 sasl.login.refresh.buffer.seconds = 300
5656 sasl.login.refresh.min.period.seconds = 60
5657 sasl.login.refresh.window.factor = 0.8
5658 sasl.login.refresh.window.jitter = 0.05
5659 sasl.login.retry.backoff.max.ms = 10000
5660 sasl.login.retry.backoff.ms = 100
5661 sasl.mechanism = GSSAPI
5662 sasl.oauthbearer.assertion.algorithm = RS256
5663 sasl.oauthbearer.assertion.claim.aud = null
5664 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5665 sasl.oauthbearer.assertion.claim.iss = null
5666 sasl.oauthbearer.assertion.claim.jti.include = false
5667 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5668 sasl.oauthbearer.assertion.claim.sub = null
5669 sasl.oauthbearer.assertion.file = null
5670 sasl.oauthbearer.assertion.private.key.file = null
5671 sasl.oauthbearer.assertion.private.key.passphrase = null
5672 sasl.oauthbearer.assertion.template.file = null
5673 sasl.oauthbearer.client.credentials.client.id = null
5674 sasl.oauthbearer.client.credentials.client.secret = null
5675 sasl.oauthbearer.clock.skew.seconds = 30
5676 sasl.oauthbearer.expected.audience = null
5677 sasl.oauthbearer.expected.issuer = null
5678 sasl.oauthbearer.header.urlencode = false
5679 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5680 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5681 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5682 sasl.oauthbearer.jwks.endpoint.url = null
5683 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5684 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5685 sasl.oauthbearer.scope = null
5686 sasl.oauthbearer.scope.claim.name = scope
5687 sasl.oauthbearer.sub.claim.name = sub
5688 sasl.oauthbearer.token.endpoint.url = null
5689 security.protocol = PLAINTEXT
5690 security.providers = null
5691 send.buffer.bytes = 131072
5692 session.timeout.ms = 45000
5693 share.acknowledgement.mode = implicit
5694 socket.connection.setup.timeout.max.ms = 30000
5695 socket.connection.setup.timeout.ms = 10000
5696 ssl.cipher.suites = null
5697 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5698 ssl.endpoint.identification.algorithm = https
5699 ssl.engine.factory.class = null
5700 ssl.key.password = null
5701 ssl.keymanager.algorithm = SunX509
5702 ssl.keystore.certificate.chain = null
5703 ssl.keystore.key = null
5704 ssl.keystore.location = null
5705 ssl.keystore.password = null
5706 ssl.keystore.type = JKS
5707 ssl.protocol = TLSv1.3
5708 ssl.provider = null
5709 ssl.secure.random.implementation = null
5710 ssl.trustmanager.algorithm = PKIX
5711 ssl.truststore.certificates = null
5712 ssl.truststore.location = null
5713 ssl.truststore.password = null
5714 ssl.truststore.type = JKS
5715 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
5716
571712:05:37.579 [virtual-655] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
571812:05:37.582 [virtual-655] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
571912:05:37.582 [virtual-655] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
572012:05:37.582 [virtual-655] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327937582
572112:05:37.583 [virtual-662] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Subscribed to topic(s): t3_1
572212:05:37.585 [virtual-662] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Cluster ID: cERjULLDRBGv7lPJWPu8sA
572312:05:37.585 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
572412:05:37.586 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
572512:05:37.588 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g3_2 in Empty state. Created a new member id consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad and requesting the member to rejoin with this id.
572612:05:37.589 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: need to re-join with the given member-id: consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad
572712:05:37.589 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] (Re-)joining group
572812:05:37.590 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad joins group g3_2 in Empty state. Adding to the group now.
572912:05:37.590 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad with group instance id null; client reason: need to re-join with the given member-id: consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad).
573012:05:40.590 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g3_2 generation 1 with 1 members.
573112:05:40.591 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad', protocol='range'}
573212:05:40.591 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Finished assignment for group at generation 1: {consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad=Assignment(partitions=[t3_1-0])}
573312:05:40.592 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad for group g3_2 for generation 1. The group has 1 members, 0 of which are static.
573412:05:40.598 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad', protocol='range'}
573512:05:40.598 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Notifying assignor about the new Assignment(partitions=[t3_1-0])
573612:05:40.598 [virtual-662] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Adding newly assigned partitions: [t3_1-0]
573712:05:40.599 [virtual-662] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Found no committed offset for partition t3_1-0
573812:05:40.602 [virtual-662] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting offset for partition t3_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
573912:05:40.603 [virtual-657] ERROR o.k.KafkaFlow$ - Exception when polling for records
5740java.lang.InterruptedException: null
5741 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5742 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5743 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5744 at ox.channels.ActorRef.ask(actor.scala:64)
5745 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5746 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5747 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5748 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5749 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5750 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5751 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5752 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5753 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5754 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
575512:05:40.604 [virtual-662] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5756java.lang.InterruptedException: null
5757 ... 18 common frames omitted
5758Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5759 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5760 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5761 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5762 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5763 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5764 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5765 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5766 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5767 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5768 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5769 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5770 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5771 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5772 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5773 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5774 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5775 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5776 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
577712:05:40.603 [virtual-661] ERROR o.k.KafkaFlow$ - Exception when polling for records
5778java.lang.InterruptedException: null
5779 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
5780 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
5781 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
5782 at ox.channels.ActorRef.ask(actor.scala:64)
5783 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
5784 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
5785 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5786 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5787 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
5788 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
5789 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
5790 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5791 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5792 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
579312:05:40.604 [virtual-658] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
5794java.lang.InterruptedException: null
5795 ... 18 common frames omitted
5796Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
5797 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
5798 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
5799 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
5800 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
5801 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
5802 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
5803 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
5804 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
5805 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
5806 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
5807 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
5808 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
5809 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
5810 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
5811 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
5812 at scala.Function0.apply$mcV$sp(Function0.scala:45)
5813 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
5814 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
581512:05:40.604 [virtual-665] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Revoke previously assigned partitions [t3_1-0]
581612:05:40.604 [virtual-664] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Revoke previously assigned partitions [t3_1-0]
581712:05:40.604 [virtual-665] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Member consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
581812:05:40.604 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Member consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
581912:05:40.604 [virtual-665] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Resetting generation and member id due to: consumer pro-actively leaving the group
582012:05:40.605 [virtual-665] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_1-5, groupId=g3_1] Request joining group due to: consumer pro-actively leaving the group
582112:05:40.604 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Resetting generation and member id due to: consumer pro-actively leaving the group
582212:05:40.605 [virtual-664] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g3_2-6, groupId=g3_2] Request joining group due to: consumer pro-actively leaving the group
582312:05:40.605 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_1] Member consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
582412:05:40.605 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g3_1-5-f31eeef0-ca5c-493d-8243-5c114b84f016) members.).
582512:05:40.605 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_1 with generation 4 is now empty.
582612:05:40.606 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g3_2] Member consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
582712:05:40.606 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g3_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g3_2-6-919e447b-584c-4f01-8bb0-5cec7bdf07ad) members.).
582812:05:40.606 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g3_2 with generation 2 is now empty.
582912:05:41.091 [virtual-665] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
583012:05:41.091 [virtual-665] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
583112:05:41.091 [virtual-665] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
583212:05:41.091 [virtual-665] INFO o.a.k.c.m.Metrics - Metrics reporters closed
583312:05:41.093 [virtual-665] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_1-5 unregistered
583412:05:41.104 [virtual-664] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
583512:05:41.104 [virtual-664] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
583612:05:41.104 [virtual-664] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
583712:05:41.104 [virtual-664] INFO o.a.k.c.m.Metrics - Metrics reporters closed
583812:05:41.106 [virtual-664] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g3_2-6 unregistered
583912:05:41.109 [virtual-666] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
5840 acks = -1
5841 batch.size = 16384
5842 bootstrap.servers = [localhost:6001]
5843 buffer.memory = 33554432
5844 client.dns.lookup = use_all_dns_ips
5845 client.id = producer-11
5846 compression.gzip.level = -1
5847 compression.lz4.level = 9
5848 compression.type = none
5849 compression.zstd.level = 3
5850 connections.max.idle.ms = 540000
5851 delivery.timeout.ms = 120000
5852 enable.idempotence = true
5853 enable.metrics.push = true
5854 interceptor.classes = []
5855 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
5856 linger.ms = 5
5857 max.block.ms = 60000
5858 max.in.flight.requests.per.connection = 5
5859 max.request.size = 1048576
5860 metadata.max.age.ms = 300000
5861 metadata.max.idle.ms = 300000
5862 metadata.recovery.rebootstrap.trigger.ms = 300000
5863 metadata.recovery.strategy = rebootstrap
5864 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
5865 metrics.num.samples = 2
5866 metrics.recording.level = INFO
5867 metrics.sample.window.ms = 30000
5868 partitioner.adaptive.partitioning.enable = true
5869 partitioner.availability.timeout.ms = 0
5870 partitioner.class = null
5871 partitioner.ignore.keys = false
5872 receive.buffer.bytes = 32768
5873 reconnect.backoff.max.ms = 1000
5874 reconnect.backoff.ms = 50
5875 request.timeout.ms = 30000
5876 retries = 2147483647
5877 retry.backoff.max.ms = 1000
5878 retry.backoff.ms = 100
5879 sasl.client.callback.handler.class = null
5880 sasl.jaas.config = null
5881 sasl.kerberos.kinit.cmd = /usr/bin/kinit
5882 sasl.kerberos.min.time.before.relogin = 60000
5883 sasl.kerberos.service.name = null
5884 sasl.kerberos.ticket.renew.jitter = 0.05
5885 sasl.kerberos.ticket.renew.window.factor = 0.8
5886 sasl.login.callback.handler.class = null
5887 sasl.login.class = null
5888 sasl.login.connect.timeout.ms = null
5889 sasl.login.read.timeout.ms = null
5890 sasl.login.refresh.buffer.seconds = 300
5891 sasl.login.refresh.min.period.seconds = 60
5892 sasl.login.refresh.window.factor = 0.8
5893 sasl.login.refresh.window.jitter = 0.05
5894 sasl.login.retry.backoff.max.ms = 10000
5895 sasl.login.retry.backoff.ms = 100
5896 sasl.mechanism = GSSAPI
5897 sasl.oauthbearer.assertion.algorithm = RS256
5898 sasl.oauthbearer.assertion.claim.aud = null
5899 sasl.oauthbearer.assertion.claim.exp.seconds = 300
5900 sasl.oauthbearer.assertion.claim.iss = null
5901 sasl.oauthbearer.assertion.claim.jti.include = false
5902 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
5903 sasl.oauthbearer.assertion.claim.sub = null
5904 sasl.oauthbearer.assertion.file = null
5905 sasl.oauthbearer.assertion.private.key.file = null
5906 sasl.oauthbearer.assertion.private.key.passphrase = null
5907 sasl.oauthbearer.assertion.template.file = null
5908 sasl.oauthbearer.client.credentials.client.id = null
5909 sasl.oauthbearer.client.credentials.client.secret = null
5910 sasl.oauthbearer.clock.skew.seconds = 30
5911 sasl.oauthbearer.expected.audience = null
5912 sasl.oauthbearer.expected.issuer = null
5913 sasl.oauthbearer.header.urlencode = false
5914 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
5915 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
5916 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
5917 sasl.oauthbearer.jwks.endpoint.url = null
5918 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
5919 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
5920 sasl.oauthbearer.scope = null
5921 sasl.oauthbearer.scope.claim.name = scope
5922 sasl.oauthbearer.sub.claim.name = sub
5923 sasl.oauthbearer.token.endpoint.url = null
5924 security.protocol = PLAINTEXT
5925 security.providers = null
5926 send.buffer.bytes = 131072
5927 socket.connection.setup.timeout.max.ms = 30000
5928 socket.connection.setup.timeout.ms = 10000
5929 ssl.cipher.suites = null
5930 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
5931 ssl.endpoint.identification.algorithm = https
5932 ssl.engine.factory.class = null
5933 ssl.key.password = null
5934 ssl.keymanager.algorithm = SunX509
5935 ssl.keystore.certificate.chain = null
5936 ssl.keystore.key = null
5937 ssl.keystore.location = null
5938 ssl.keystore.password = null
5939 ssl.keystore.type = JKS
5940 ssl.protocol = TLSv1.3
5941 ssl.provider = null
5942 ssl.secure.random.implementation = null
5943 ssl.trustmanager.algorithm = PKIX
5944 ssl.truststore.certificates = null
5945 ssl.truststore.location = null
5946 ssl.truststore.password = null
5947 ssl.truststore.type = JKS
5948 transaction.timeout.ms = 60000
5949 transaction.two.phase.commit.enable = false
5950 transactional.id = null
5951 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
5952
595312:05:41.109 [virtual-666] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
595412:05:41.110 [virtual-666] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Instantiated an idempotent producer.
595512:05:41.112 [virtual-666] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
595612:05:41.112 [virtual-666] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
595712:05:41.112 [virtual-666] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327941112
595812:05:41.114 [data-plane-kafka-request-handler-4] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t4) to the active controller.
595912:05:41.115 [kafka-producer-network-thread | producer-11] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-11] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t4=UNKNOWN_TOPIC_OR_PARTITION}
596012:05:41.115 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.Metadata - [Producer clientId=producer-11] Cluster ID: cERjULLDRBGv7lPJWPu8sA
596112:05:41.115 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t4', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
596212:05:41.115 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t4 with topic ID heZYt0q3SuSjPI99yQyrIQ.
596312:05:41.116 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t4-0 with topic ID heZYt0q3SuSjPI99yQyrIQ and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
596412:05:41.116 [kafka-producer-network-thread | producer-11] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-11] ProducerId set to 10 with epoch 0
596512:05:41.119 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
596612:05:41.119 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t4-0)
596712:05:41.119 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t4-0 with topic id heZYt0q3SuSjPI99yQyrIQ.
596812:05:41.121 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t4-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
596912:05:41.122 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t4-0 in /tmp/kafka-logs4345019044203235659/t4-0 with properties {}
597012:05:41.122 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] No checkpointed highwatermark is found for partition t4-0
597112:05:41.122 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t4-0 broker=0] Log loaded for partition t4-0 with initial high watermark 0
597212:05:41.122 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t4-0 with topic id Some(heZYt0q3SuSjPI99yQyrIQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
597312:05:41.222 [virtual-670] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-11] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
597412:05:41.225 [virtual-670] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
597512:05:41.225 [virtual-670] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
597612:05:41.225 [virtual-670] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
597712:05:41.225 [virtual-670] INFO o.a.k.c.m.Metrics - Metrics reporters closed
597812:05:41.226 [virtual-670] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-11 unregistered
597912:05:41.227 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
5980 allow.auto.create.topics = true
5981 auto.commit.interval.ms = 5000
5982 auto.offset.reset = earliest
5983 bootstrap.servers = [localhost:6001]
5984 check.crcs = true
5985 client.dns.lookup = use_all_dns_ips
5986 client.id = consumer-embedded-kafka-spec-7
5987 client.rack =
5988 connections.max.idle.ms = 540000
5989 default.api.timeout.ms = 60000
5990 enable.auto.commit = false
5991 enable.metrics.push = true
5992 exclude.internal.topics = true
5993 fetch.max.bytes = 52428800
5994 fetch.max.wait.ms = 500
5995 fetch.min.bytes = 1
5996 group.id = embedded-kafka-spec
5997 group.instance.id = null
5998 group.protocol = classic
5999 group.remote.assignor = null
6000 heartbeat.interval.ms = 3000
6001 interceptor.classes = []
6002 internal.leave.group.on.close = true
6003 internal.throw.on.fetch.stable.offset.unsupported = false
6004 isolation.level = read_uncommitted
6005 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6006 max.partition.fetch.bytes = 1048576
6007 max.poll.interval.ms = 300000
6008 max.poll.records = 500
6009 metadata.max.age.ms = 300000
6010 metadata.recovery.rebootstrap.trigger.ms = 300000
6011 metadata.recovery.strategy = rebootstrap
6012 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6013 metrics.num.samples = 2
6014 metrics.recording.level = INFO
6015 metrics.sample.window.ms = 30000
6016 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6017 receive.buffer.bytes = 65536
6018 reconnect.backoff.max.ms = 1000
6019 reconnect.backoff.ms = 50
6020 request.timeout.ms = 30000
6021 retry.backoff.max.ms = 1000
6022 retry.backoff.ms = 100
6023 sasl.client.callback.handler.class = null
6024 sasl.jaas.config = null
6025 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6026 sasl.kerberos.min.time.before.relogin = 60000
6027 sasl.kerberos.service.name = null
6028 sasl.kerberos.ticket.renew.jitter = 0.05
6029 sasl.kerberos.ticket.renew.window.factor = 0.8
6030 sasl.login.callback.handler.class = null
6031 sasl.login.class = null
6032 sasl.login.connect.timeout.ms = null
6033 sasl.login.read.timeout.ms = null
6034 sasl.login.refresh.buffer.seconds = 300
6035 sasl.login.refresh.min.period.seconds = 60
6036 sasl.login.refresh.window.factor = 0.8
6037 sasl.login.refresh.window.jitter = 0.05
6038 sasl.login.retry.backoff.max.ms = 10000
6039 sasl.login.retry.backoff.ms = 100
6040 sasl.mechanism = GSSAPI
6041 sasl.oauthbearer.assertion.algorithm = RS256
6042 sasl.oauthbearer.assertion.claim.aud = null
6043 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6044 sasl.oauthbearer.assertion.claim.iss = null
6045 sasl.oauthbearer.assertion.claim.jti.include = false
6046 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6047 sasl.oauthbearer.assertion.claim.sub = null
6048 sasl.oauthbearer.assertion.file = null
6049 sasl.oauthbearer.assertion.private.key.file = null
6050 sasl.oauthbearer.assertion.private.key.passphrase = null
6051 sasl.oauthbearer.assertion.template.file = null
6052 sasl.oauthbearer.client.credentials.client.id = null
6053 sasl.oauthbearer.client.credentials.client.secret = null
6054 sasl.oauthbearer.clock.skew.seconds = 30
6055 sasl.oauthbearer.expected.audience = null
6056 sasl.oauthbearer.expected.issuer = null
6057 sasl.oauthbearer.header.urlencode = false
6058 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6059 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6060 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6061 sasl.oauthbearer.jwks.endpoint.url = null
6062 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6063 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6064 sasl.oauthbearer.scope = null
6065 sasl.oauthbearer.scope.claim.name = scope
6066 sasl.oauthbearer.sub.claim.name = sub
6067 sasl.oauthbearer.token.endpoint.url = null
6068 security.protocol = PLAINTEXT
6069 security.providers = null
6070 send.buffer.bytes = 131072
6071 session.timeout.ms = 45000
6072 share.acknowledgement.mode = implicit
6073 socket.connection.setup.timeout.max.ms = 30000
6074 socket.connection.setup.timeout.ms = 10000
6075 ssl.cipher.suites = null
6076 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6077 ssl.endpoint.identification.algorithm = https
6078 ssl.engine.factory.class = null
6079 ssl.key.password = null
6080 ssl.keymanager.algorithm = SunX509
6081 ssl.keystore.certificate.chain = null
6082 ssl.keystore.key = null
6083 ssl.keystore.location = null
6084 ssl.keystore.password = null
6085 ssl.keystore.type = JKS
6086 ssl.protocol = TLSv1.3
6087 ssl.provider = null
6088 ssl.secure.random.implementation = null
6089 ssl.trustmanager.algorithm = PKIX
6090 ssl.truststore.certificates = null
6091 ssl.truststore.location = null
6092 ssl.truststore.password = null
6093 ssl.truststore.type = JKS
6094 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6095
609612:05:41.227 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
609712:05:41.229 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
609812:05:41.229 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
609912:05:41.229 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327941229
610012:05:41.229 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Subscribed to topic(s): t4
610112:05:41.231 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Cluster ID: cERjULLDRBGv7lPJWPu8sA
610212:05:41.234 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
610312:05:41.235 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
610412:05:41.237 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group embedded-kafka-spec in Empty state. Created a new member id consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e and requesting the member to rejoin with this id.
610512:05:41.237 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e
610612:05:41.237 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] (Re-)joining group
610712:05:41.238 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e joins group embedded-kafka-spec in Empty state. Adding to the group now.
610812:05:41.238 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e with group instance id null; client reason: need to re-join with the given member-id: consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e).
610912:05:44.238 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group embedded-kafka-spec generation 3 with 1 members.
611012:05:44.239 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully joined group with generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e', protocol='range'}
611112:05:44.239 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Finished assignment for group at generation 3: {consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e=Assignment(partitions=[t4-0])}
611212:05:44.239 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e for group embedded-kafka-spec for generation 3. The group has 1 members, 0 of which are static.
611312:05:44.246 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Successfully synced group in generation Generation{generationId=3, memberId='consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e', protocol='range'}
611412:05:44.246 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Notifying assignor about the new Assignment(partitions=[t4-0])
611512:05:44.246 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Adding newly assigned partitions: [t4-0]
611612:05:44.247 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Found no committed offset for partition t4-0
611712:05:44.248 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting offset for partition t4-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
611812:05:44.269 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Revoke previously assigned partitions [t4-0]
611912:05:44.270 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
612012:05:44.270 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Resetting generation and member id due to: consumer pro-actively leaving the group
612112:05:44.270 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-embedded-kafka-spec-7, groupId=embedded-kafka-spec] Request joining group due to: consumer pro-actively leaving the group
612212:05:44.270 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group embedded-kafka-spec] Member consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
612312:05:44.271 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group embedded-kafka-spec in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-embedded-kafka-spec-7-f47012b2-cdd7-4d19-9279-9415c395da1e) members.).
612412:05:44.271 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group embedded-kafka-spec with generation 4 is now empty.
612512:05:44.752 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
612612:05:44.752 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
612712:05:44.752 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
612812:05:44.752 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
612912:05:44.754 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-embedded-kafka-spec-7 unregistered
613012:05:44.755 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6131 acks = -1
6132 batch.size = 16384
6133 bootstrap.servers = [localhost:6001]
6134 buffer.memory = 33554432
6135 client.dns.lookup = use_all_dns_ips
6136 client.id = producer-12
6137 compression.gzip.level = -1
6138 compression.lz4.level = 9
6139 compression.type = none
6140 compression.zstd.level = 3
6141 connections.max.idle.ms = 540000
6142 delivery.timeout.ms = 120000
6143 enable.idempotence = true
6144 enable.metrics.push = true
6145 interceptor.classes = []
6146 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6147 linger.ms = 5
6148 max.block.ms = 10000
6149 max.in.flight.requests.per.connection = 5
6150 max.request.size = 1048576
6151 metadata.max.age.ms = 300000
6152 metadata.max.idle.ms = 300000
6153 metadata.recovery.rebootstrap.trigger.ms = 300000
6154 metadata.recovery.strategy = rebootstrap
6155 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6156 metrics.num.samples = 2
6157 metrics.recording.level = INFO
6158 metrics.sample.window.ms = 30000
6159 partitioner.adaptive.partitioning.enable = true
6160 partitioner.availability.timeout.ms = 0
6161 partitioner.class = null
6162 partitioner.ignore.keys = false
6163 receive.buffer.bytes = 32768
6164 reconnect.backoff.max.ms = 1000
6165 reconnect.backoff.ms = 50
6166 request.timeout.ms = 30000
6167 retries = 2147483647
6168 retry.backoff.max.ms = 1000
6169 retry.backoff.ms = 1000
6170 sasl.client.callback.handler.class = null
6171 sasl.jaas.config = null
6172 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6173 sasl.kerberos.min.time.before.relogin = 60000
6174 sasl.kerberos.service.name = null
6175 sasl.kerberos.ticket.renew.jitter = 0.05
6176 sasl.kerberos.ticket.renew.window.factor = 0.8
6177 sasl.login.callback.handler.class = null
6178 sasl.login.class = null
6179 sasl.login.connect.timeout.ms = null
6180 sasl.login.read.timeout.ms = null
6181 sasl.login.refresh.buffer.seconds = 300
6182 sasl.login.refresh.min.period.seconds = 60
6183 sasl.login.refresh.window.factor = 0.8
6184 sasl.login.refresh.window.jitter = 0.05
6185 sasl.login.retry.backoff.max.ms = 10000
6186 sasl.login.retry.backoff.ms = 100
6187 sasl.mechanism = GSSAPI
6188 sasl.oauthbearer.assertion.algorithm = RS256
6189 sasl.oauthbearer.assertion.claim.aud = null
6190 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6191 sasl.oauthbearer.assertion.claim.iss = null
6192 sasl.oauthbearer.assertion.claim.jti.include = false
6193 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6194 sasl.oauthbearer.assertion.claim.sub = null
6195 sasl.oauthbearer.assertion.file = null
6196 sasl.oauthbearer.assertion.private.key.file = null
6197 sasl.oauthbearer.assertion.private.key.passphrase = null
6198 sasl.oauthbearer.assertion.template.file = null
6199 sasl.oauthbearer.client.credentials.client.id = null
6200 sasl.oauthbearer.client.credentials.client.secret = null
6201 sasl.oauthbearer.clock.skew.seconds = 30
6202 sasl.oauthbearer.expected.audience = null
6203 sasl.oauthbearer.expected.issuer = null
6204 sasl.oauthbearer.header.urlencode = false
6205 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6206 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6207 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6208 sasl.oauthbearer.jwks.endpoint.url = null
6209 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6210 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6211 sasl.oauthbearer.scope = null
6212 sasl.oauthbearer.scope.claim.name = scope
6213 sasl.oauthbearer.sub.claim.name = sub
6214 sasl.oauthbearer.token.endpoint.url = null
6215 security.protocol = PLAINTEXT
6216 security.providers = null
6217 send.buffer.bytes = 131072
6218 socket.connection.setup.timeout.max.ms = 30000
6219 socket.connection.setup.timeout.ms = 10000
6220 ssl.cipher.suites = null
6221 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6222 ssl.endpoint.identification.algorithm = https
6223 ssl.engine.factory.class = null
6224 ssl.key.password = null
6225 ssl.keymanager.algorithm = SunX509
6226 ssl.keystore.certificate.chain = null
6227 ssl.keystore.key = null
6228 ssl.keystore.location = null
6229 ssl.keystore.password = null
6230 ssl.keystore.type = JKS
6231 ssl.protocol = TLSv1.3
6232 ssl.provider = null
6233 ssl.secure.random.implementation = null
6234 ssl.trustmanager.algorithm = PKIX
6235 ssl.truststore.certificates = null
6236 ssl.truststore.location = null
6237 ssl.truststore.password = null
6238 ssl.truststore.type = JKS
6239 transaction.timeout.ms = 60000
6240 transaction.two.phase.commit.enable = false
6241 transactional.id = null
6242 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6243
624412:05:44.756 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
624512:05:44.756 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Instantiated an idempotent producer.
624612:05:44.758 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
624712:05:44.758 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
624812:05:44.758 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327944758
624912:05:44.760 [data-plane-kafka-request-handler-6] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_1) to the active controller.
625012:05:44.761 [kafka-producer-network-thread | producer-12] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-12] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t5_1=UNKNOWN_TOPIC_OR_PARTITION}
625112:05:44.761 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.Metadata - [Producer clientId=producer-12] Cluster ID: cERjULLDRBGv7lPJWPu8sA
625212:05:44.762 [kafka-producer-network-thread | producer-12] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-12] ProducerId set to 11 with epoch 0
625312:05:44.763 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
625412:05:44.763 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_1 with topic ID nckkzEqvRD-fmmM7sLpNVg.
625512:05:44.764 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_1-0 with topic ID nckkzEqvRD-fmmM7sLpNVg and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
625612:05:44.790 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
625712:05:44.790 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_1-0)
625812:05:44.790 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_1-0 with topic id nckkzEqvRD-fmmM7sLpNVg.
625912:05:44.792 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_1-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
626012:05:44.792 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_1-0 in /tmp/kafka-logs4345019044203235659/t5_1-0 with properties {}
626112:05:44.793 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] No checkpointed highwatermark is found for partition t5_1-0
626212:05:44.793 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_1-0 broker=0] Log loaded for partition t5_1-0 with initial high watermark 0
626312:05:44.793 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_1-0 with topic id Some(nckkzEqvRD-fmmM7sLpNVg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
626412:05:45.773 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-12] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
626512:05:45.774 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
626612:05:45.774 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
626712:05:45.774 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
626812:05:45.774 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
626912:05:45.775 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-12 unregistered
627012:05:45.775 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6271 acks = -1
6272 batch.size = 16384
6273 bootstrap.servers = [localhost:6001]
6274 buffer.memory = 33554432
6275 client.dns.lookup = use_all_dns_ips
6276 client.id = producer-13
6277 compression.gzip.level = -1
6278 compression.lz4.level = 9
6279 compression.type = none
6280 compression.zstd.level = 3
6281 connections.max.idle.ms = 540000
6282 delivery.timeout.ms = 120000
6283 enable.idempotence = true
6284 enable.metrics.push = true
6285 interceptor.classes = []
6286 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6287 linger.ms = 5
6288 max.block.ms = 10000
6289 max.in.flight.requests.per.connection = 5
6290 max.request.size = 1048576
6291 metadata.max.age.ms = 300000
6292 metadata.max.idle.ms = 300000
6293 metadata.recovery.rebootstrap.trigger.ms = 300000
6294 metadata.recovery.strategy = rebootstrap
6295 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6296 metrics.num.samples = 2
6297 metrics.recording.level = INFO
6298 metrics.sample.window.ms = 30000
6299 partitioner.adaptive.partitioning.enable = true
6300 partitioner.availability.timeout.ms = 0
6301 partitioner.class = null
6302 partitioner.ignore.keys = false
6303 receive.buffer.bytes = 32768
6304 reconnect.backoff.max.ms = 1000
6305 reconnect.backoff.ms = 50
6306 request.timeout.ms = 30000
6307 retries = 2147483647
6308 retry.backoff.max.ms = 1000
6309 retry.backoff.ms = 1000
6310 sasl.client.callback.handler.class = null
6311 sasl.jaas.config = null
6312 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6313 sasl.kerberos.min.time.before.relogin = 60000
6314 sasl.kerberos.service.name = null
6315 sasl.kerberos.ticket.renew.jitter = 0.05
6316 sasl.kerberos.ticket.renew.window.factor = 0.8
6317 sasl.login.callback.handler.class = null
6318 sasl.login.class = null
6319 sasl.login.connect.timeout.ms = null
6320 sasl.login.read.timeout.ms = null
6321 sasl.login.refresh.buffer.seconds = 300
6322 sasl.login.refresh.min.period.seconds = 60
6323 sasl.login.refresh.window.factor = 0.8
6324 sasl.login.refresh.window.jitter = 0.05
6325 sasl.login.retry.backoff.max.ms = 10000
6326 sasl.login.retry.backoff.ms = 100
6327 sasl.mechanism = GSSAPI
6328 sasl.oauthbearer.assertion.algorithm = RS256
6329 sasl.oauthbearer.assertion.claim.aud = null
6330 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6331 sasl.oauthbearer.assertion.claim.iss = null
6332 sasl.oauthbearer.assertion.claim.jti.include = false
6333 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6334 sasl.oauthbearer.assertion.claim.sub = null
6335 sasl.oauthbearer.assertion.file = null
6336 sasl.oauthbearer.assertion.private.key.file = null
6337 sasl.oauthbearer.assertion.private.key.passphrase = null
6338 sasl.oauthbearer.assertion.template.file = null
6339 sasl.oauthbearer.client.credentials.client.id = null
6340 sasl.oauthbearer.client.credentials.client.secret = null
6341 sasl.oauthbearer.clock.skew.seconds = 30
6342 sasl.oauthbearer.expected.audience = null
6343 sasl.oauthbearer.expected.issuer = null
6344 sasl.oauthbearer.header.urlencode = false
6345 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6346 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6347 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6348 sasl.oauthbearer.jwks.endpoint.url = null
6349 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6350 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6351 sasl.oauthbearer.scope = null
6352 sasl.oauthbearer.scope.claim.name = scope
6353 sasl.oauthbearer.sub.claim.name = sub
6354 sasl.oauthbearer.token.endpoint.url = null
6355 security.protocol = PLAINTEXT
6356 security.providers = null
6357 send.buffer.bytes = 131072
6358 socket.connection.setup.timeout.max.ms = 30000
6359 socket.connection.setup.timeout.ms = 10000
6360 ssl.cipher.suites = null
6361 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6362 ssl.endpoint.identification.algorithm = https
6363 ssl.engine.factory.class = null
6364 ssl.key.password = null
6365 ssl.keymanager.algorithm = SunX509
6366 ssl.keystore.certificate.chain = null
6367 ssl.keystore.key = null
6368 ssl.keystore.location = null
6369 ssl.keystore.password = null
6370 ssl.keystore.type = JKS
6371 ssl.protocol = TLSv1.3
6372 ssl.provider = null
6373 ssl.secure.random.implementation = null
6374 ssl.trustmanager.algorithm = PKIX
6375 ssl.truststore.certificates = null
6376 ssl.truststore.location = null
6377 ssl.truststore.password = null
6378 ssl.truststore.type = JKS
6379 transaction.timeout.ms = 60000
6380 transaction.two.phase.commit.enable = false
6381 transactional.id = null
6382 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6383
638412:05:45.775 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
638512:05:45.775 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Instantiated an idempotent producer.
638612:05:45.777 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
638712:05:45.777 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
638812:05:45.777 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327945777
638912:05:45.779 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.Metadata - [Producer clientId=producer-13] Cluster ID: cERjULLDRBGv7lPJWPu8sA
639012:05:45.780 [kafka-producer-network-thread | producer-13] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-13] ProducerId set to 12 with epoch 0
639112:05:45.787 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-13] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
639212:05:45.788 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
639312:05:45.789 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
639412:05:45.789 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
639512:05:45.789 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
639612:05:45.789 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-13 unregistered
639712:05:45.789 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6398 acks = -1
6399 batch.size = 16384
6400 bootstrap.servers = [localhost:6001]
6401 buffer.memory = 33554432
6402 client.dns.lookup = use_all_dns_ips
6403 client.id = producer-14
6404 compression.gzip.level = -1
6405 compression.lz4.level = 9
6406 compression.type = none
6407 compression.zstd.level = 3
6408 connections.max.idle.ms = 540000
6409 delivery.timeout.ms = 120000
6410 enable.idempotence = true
6411 enable.metrics.push = true
6412 interceptor.classes = []
6413 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6414 linger.ms = 5
6415 max.block.ms = 10000
6416 max.in.flight.requests.per.connection = 5
6417 max.request.size = 1048576
6418 metadata.max.age.ms = 300000
6419 metadata.max.idle.ms = 300000
6420 metadata.recovery.rebootstrap.trigger.ms = 300000
6421 metadata.recovery.strategy = rebootstrap
6422 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6423 metrics.num.samples = 2
6424 metrics.recording.level = INFO
6425 metrics.sample.window.ms = 30000
6426 partitioner.adaptive.partitioning.enable = true
6427 partitioner.availability.timeout.ms = 0
6428 partitioner.class = null
6429 partitioner.ignore.keys = false
6430 receive.buffer.bytes = 32768
6431 reconnect.backoff.max.ms = 1000
6432 reconnect.backoff.ms = 50
6433 request.timeout.ms = 30000
6434 retries = 2147483647
6435 retry.backoff.max.ms = 1000
6436 retry.backoff.ms = 1000
6437 sasl.client.callback.handler.class = null
6438 sasl.jaas.config = null
6439 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6440 sasl.kerberos.min.time.before.relogin = 60000
6441 sasl.kerberos.service.name = null
6442 sasl.kerberos.ticket.renew.jitter = 0.05
6443 sasl.kerberos.ticket.renew.window.factor = 0.8
6444 sasl.login.callback.handler.class = null
6445 sasl.login.class = null
6446 sasl.login.connect.timeout.ms = null
6447 sasl.login.read.timeout.ms = null
6448 sasl.login.refresh.buffer.seconds = 300
6449 sasl.login.refresh.min.period.seconds = 60
6450 sasl.login.refresh.window.factor = 0.8
6451 sasl.login.refresh.window.jitter = 0.05
6452 sasl.login.retry.backoff.max.ms = 10000
6453 sasl.login.retry.backoff.ms = 100
6454 sasl.mechanism = GSSAPI
6455 sasl.oauthbearer.assertion.algorithm = RS256
6456 sasl.oauthbearer.assertion.claim.aud = null
6457 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6458 sasl.oauthbearer.assertion.claim.iss = null
6459 sasl.oauthbearer.assertion.claim.jti.include = false
6460 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6461 sasl.oauthbearer.assertion.claim.sub = null
6462 sasl.oauthbearer.assertion.file = null
6463 sasl.oauthbearer.assertion.private.key.file = null
6464 sasl.oauthbearer.assertion.private.key.passphrase = null
6465 sasl.oauthbearer.assertion.template.file = null
6466 sasl.oauthbearer.client.credentials.client.id = null
6467 sasl.oauthbearer.client.credentials.client.secret = null
6468 sasl.oauthbearer.clock.skew.seconds = 30
6469 sasl.oauthbearer.expected.audience = null
6470 sasl.oauthbearer.expected.issuer = null
6471 sasl.oauthbearer.header.urlencode = false
6472 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6473 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6474 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6475 sasl.oauthbearer.jwks.endpoint.url = null
6476 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6477 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6478 sasl.oauthbearer.scope = null
6479 sasl.oauthbearer.scope.claim.name = scope
6480 sasl.oauthbearer.sub.claim.name = sub
6481 sasl.oauthbearer.token.endpoint.url = null
6482 security.protocol = PLAINTEXT
6483 security.providers = null
6484 send.buffer.bytes = 131072
6485 socket.connection.setup.timeout.max.ms = 30000
6486 socket.connection.setup.timeout.ms = 10000
6487 ssl.cipher.suites = null
6488 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6489 ssl.endpoint.identification.algorithm = https
6490 ssl.engine.factory.class = null
6491 ssl.key.password = null
6492 ssl.keymanager.algorithm = SunX509
6493 ssl.keystore.certificate.chain = null
6494 ssl.keystore.key = null
6495 ssl.keystore.location = null
6496 ssl.keystore.password = null
6497 ssl.keystore.type = JKS
6498 ssl.protocol = TLSv1.3
6499 ssl.provider = null
6500 ssl.secure.random.implementation = null
6501 ssl.trustmanager.algorithm = PKIX
6502 ssl.truststore.certificates = null
6503 ssl.truststore.location = null
6504 ssl.truststore.password = null
6505 ssl.truststore.type = JKS
6506 transaction.timeout.ms = 60000
6507 transaction.two.phase.commit.enable = false
6508 transactional.id = null
6509 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6510
651112:05:45.789 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
651212:05:45.790 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Instantiated an idempotent producer.
651312:05:45.791 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
651412:05:45.791 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
651512:05:45.792 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327945791
651612:05:45.794 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.Metadata - [Producer clientId=producer-14] Cluster ID: cERjULLDRBGv7lPJWPu8sA
651712:05:45.794 [kafka-producer-network-thread | producer-14] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-14] ProducerId set to 13 with epoch 0
651812:05:45.801 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-14] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
651912:05:45.802 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
652012:05:45.802 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
652112:05:45.802 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
652212:05:45.802 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
652312:05:45.803 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-14 unregistered
652412:05:45.804 [virtual-675] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6525 allow.auto.create.topics = true
6526 auto.commit.interval.ms = 5000
6527 auto.offset.reset = earliest
6528 bootstrap.servers = [localhost:6001]
6529 check.crcs = true
6530 client.dns.lookup = use_all_dns_ips
6531 client.id = consumer-g5_1-8
6532 client.rack =
6533 connections.max.idle.ms = 540000
6534 default.api.timeout.ms = 60000
6535 enable.auto.commit = false
6536 enable.metrics.push = true
6537 exclude.internal.topics = true
6538 fetch.max.bytes = 52428800
6539 fetch.max.wait.ms = 500
6540 fetch.min.bytes = 1
6541 group.id = g5_1
6542 group.instance.id = null
6543 group.protocol = classic
6544 group.remote.assignor = null
6545 heartbeat.interval.ms = 3000
6546 interceptor.classes = []
6547 internal.leave.group.on.close = true
6548 internal.throw.on.fetch.stable.offset.unsupported = false
6549 isolation.level = read_uncommitted
6550 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6551 max.partition.fetch.bytes = 1048576
6552 max.poll.interval.ms = 300000
6553 max.poll.records = 500
6554 metadata.max.age.ms = 300000
6555 metadata.recovery.rebootstrap.trigger.ms = 300000
6556 metadata.recovery.strategy = rebootstrap
6557 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6558 metrics.num.samples = 2
6559 metrics.recording.level = INFO
6560 metrics.sample.window.ms = 30000
6561 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6562 receive.buffer.bytes = 65536
6563 reconnect.backoff.max.ms = 1000
6564 reconnect.backoff.ms = 50
6565 request.timeout.ms = 30000
6566 retry.backoff.max.ms = 1000
6567 retry.backoff.ms = 100
6568 sasl.client.callback.handler.class = null
6569 sasl.jaas.config = null
6570 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6571 sasl.kerberos.min.time.before.relogin = 60000
6572 sasl.kerberos.service.name = null
6573 sasl.kerberos.ticket.renew.jitter = 0.05
6574 sasl.kerberos.ticket.renew.window.factor = 0.8
6575 sasl.login.callback.handler.class = null
6576 sasl.login.class = null
6577 sasl.login.connect.timeout.ms = null
6578 sasl.login.read.timeout.ms = null
6579 sasl.login.refresh.buffer.seconds = 300
6580 sasl.login.refresh.min.period.seconds = 60
6581 sasl.login.refresh.window.factor = 0.8
6582 sasl.login.refresh.window.jitter = 0.05
6583 sasl.login.retry.backoff.max.ms = 10000
6584 sasl.login.retry.backoff.ms = 100
6585 sasl.mechanism = GSSAPI
6586 sasl.oauthbearer.assertion.algorithm = RS256
6587 sasl.oauthbearer.assertion.claim.aud = null
6588 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6589 sasl.oauthbearer.assertion.claim.iss = null
6590 sasl.oauthbearer.assertion.claim.jti.include = false
6591 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6592 sasl.oauthbearer.assertion.claim.sub = null
6593 sasl.oauthbearer.assertion.file = null
6594 sasl.oauthbearer.assertion.private.key.file = null
6595 sasl.oauthbearer.assertion.private.key.passphrase = null
6596 sasl.oauthbearer.assertion.template.file = null
6597 sasl.oauthbearer.client.credentials.client.id = null
6598 sasl.oauthbearer.client.credentials.client.secret = null
6599 sasl.oauthbearer.clock.skew.seconds = 30
6600 sasl.oauthbearer.expected.audience = null
6601 sasl.oauthbearer.expected.issuer = null
6602 sasl.oauthbearer.header.urlencode = false
6603 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6604 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6605 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6606 sasl.oauthbearer.jwks.endpoint.url = null
6607 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6608 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6609 sasl.oauthbearer.scope = null
6610 sasl.oauthbearer.scope.claim.name = scope
6611 sasl.oauthbearer.sub.claim.name = sub
6612 sasl.oauthbearer.token.endpoint.url = null
6613 security.protocol = PLAINTEXT
6614 security.providers = null
6615 send.buffer.bytes = 131072
6616 session.timeout.ms = 45000
6617 share.acknowledgement.mode = implicit
6618 socket.connection.setup.timeout.max.ms = 30000
6619 socket.connection.setup.timeout.ms = 10000
6620 ssl.cipher.suites = null
6621 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6622 ssl.endpoint.identification.algorithm = https
6623 ssl.engine.factory.class = null
6624 ssl.key.password = null
6625 ssl.keymanager.algorithm = SunX509
6626 ssl.keystore.certificate.chain = null
6627 ssl.keystore.key = null
6628 ssl.keystore.location = null
6629 ssl.keystore.password = null
6630 ssl.keystore.type = JKS
6631 ssl.protocol = TLSv1.3
6632 ssl.provider = null
6633 ssl.secure.random.implementation = null
6634 ssl.trustmanager.algorithm = PKIX
6635 ssl.truststore.certificates = null
6636 ssl.truststore.location = null
6637 ssl.truststore.password = null
6638 ssl.truststore.type = JKS
6639 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6640
664112:05:45.804 [virtual-677] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
6642 allow.auto.create.topics = true
6643 auto.commit.interval.ms = 5000
6644 auto.offset.reset = earliest
6645 bootstrap.servers = [localhost:6001]
6646 check.crcs = true
6647 client.dns.lookup = use_all_dns_ips
6648 client.id = consumer-g5_1-9
6649 client.rack =
6650 connections.max.idle.ms = 540000
6651 default.api.timeout.ms = 60000
6652 enable.auto.commit = false
6653 enable.metrics.push = true
6654 exclude.internal.topics = true
6655 fetch.max.bytes = 52428800
6656 fetch.max.wait.ms = 500
6657 fetch.min.bytes = 1
6658 group.id = g5_1
6659 group.instance.id = null
6660 group.protocol = classic
6661 group.remote.assignor = null
6662 heartbeat.interval.ms = 3000
6663 interceptor.classes = []
6664 internal.leave.group.on.close = true
6665 internal.throw.on.fetch.stable.offset.unsupported = false
6666 isolation.level = read_uncommitted
6667 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6668 max.partition.fetch.bytes = 1048576
6669 max.poll.interval.ms = 300000
6670 max.poll.records = 500
6671 metadata.max.age.ms = 300000
6672 metadata.recovery.rebootstrap.trigger.ms = 300000
6673 metadata.recovery.strategy = rebootstrap
6674 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6675 metrics.num.samples = 2
6676 metrics.recording.level = INFO
6677 metrics.sample.window.ms = 30000
6678 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
6679 receive.buffer.bytes = 65536
6680 reconnect.backoff.max.ms = 1000
6681 reconnect.backoff.ms = 50
6682 request.timeout.ms = 30000
6683 retry.backoff.max.ms = 1000
6684 retry.backoff.ms = 100
6685 sasl.client.callback.handler.class = null
6686 sasl.jaas.config = null
6687 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6688 sasl.kerberos.min.time.before.relogin = 60000
6689 sasl.kerberos.service.name = null
6690 sasl.kerberos.ticket.renew.jitter = 0.05
6691 sasl.kerberos.ticket.renew.window.factor = 0.8
6692 sasl.login.callback.handler.class = null
6693 sasl.login.class = null
6694 sasl.login.connect.timeout.ms = null
6695 sasl.login.read.timeout.ms = null
6696 sasl.login.refresh.buffer.seconds = 300
6697 sasl.login.refresh.min.period.seconds = 60
6698 sasl.login.refresh.window.factor = 0.8
6699 sasl.login.refresh.window.jitter = 0.05
6700 sasl.login.retry.backoff.max.ms = 10000
6701 sasl.login.retry.backoff.ms = 100
6702 sasl.mechanism = GSSAPI
6703 sasl.oauthbearer.assertion.algorithm = RS256
6704 sasl.oauthbearer.assertion.claim.aud = null
6705 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6706 sasl.oauthbearer.assertion.claim.iss = null
6707 sasl.oauthbearer.assertion.claim.jti.include = false
6708 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6709 sasl.oauthbearer.assertion.claim.sub = null
6710 sasl.oauthbearer.assertion.file = null
6711 sasl.oauthbearer.assertion.private.key.file = null
6712 sasl.oauthbearer.assertion.private.key.passphrase = null
6713 sasl.oauthbearer.assertion.template.file = null
6714 sasl.oauthbearer.client.credentials.client.id = null
6715 sasl.oauthbearer.client.credentials.client.secret = null
6716 sasl.oauthbearer.clock.skew.seconds = 30
6717 sasl.oauthbearer.expected.audience = null
6718 sasl.oauthbearer.expected.issuer = null
6719 sasl.oauthbearer.header.urlencode = false
6720 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6721 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6722 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6723 sasl.oauthbearer.jwks.endpoint.url = null
6724 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6725 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6726 sasl.oauthbearer.scope = null
6727 sasl.oauthbearer.scope.claim.name = scope
6728 sasl.oauthbearer.sub.claim.name = sub
6729 sasl.oauthbearer.token.endpoint.url = null
6730 security.protocol = PLAINTEXT
6731 security.providers = null
6732 send.buffer.bytes = 131072
6733 session.timeout.ms = 45000
6734 share.acknowledgement.mode = implicit
6735 socket.connection.setup.timeout.max.ms = 30000
6736 socket.connection.setup.timeout.ms = 10000
6737 ssl.cipher.suites = null
6738 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6739 ssl.endpoint.identification.algorithm = https
6740 ssl.engine.factory.class = null
6741 ssl.key.password = null
6742 ssl.keymanager.algorithm = SunX509
6743 ssl.keystore.certificate.chain = null
6744 ssl.keystore.key = null
6745 ssl.keystore.location = null
6746 ssl.keystore.password = null
6747 ssl.keystore.type = JKS
6748 ssl.protocol = TLSv1.3
6749 ssl.provider = null
6750 ssl.secure.random.implementation = null
6751 ssl.trustmanager.algorithm = PKIX
6752 ssl.truststore.certificates = null
6753 ssl.truststore.location = null
6754 ssl.truststore.password = null
6755 ssl.truststore.type = JKS
6756 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
6757
675812:05:45.804 [virtual-675] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
675912:05:45.804 [virtual-677] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
676012:05:45.808 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
676112:05:45.808 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
676212:05:45.808 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327945808
676312:05:45.808 [virtual-675] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
676412:05:45.808 [virtual-675] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
676512:05:45.808 [virtual-675] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327945808
676612:05:45.808 [virtual-681] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Subscribed to topic(s): t5_2
676712:05:45.809 [virtual-677] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
6768 acks = -1
6769 batch.size = 16384
6770 bootstrap.servers = [localhost:6001]
6771 buffer.memory = 33554432
6772 client.dns.lookup = use_all_dns_ips
6773 client.id = producer-15
6774 compression.gzip.level = -1
6775 compression.lz4.level = 9
6776 compression.type = none
6777 compression.zstd.level = 3
6778 connections.max.idle.ms = 540000
6779 delivery.timeout.ms = 120000
6780 enable.idempotence = true
6781 enable.metrics.push = true
6782 interceptor.classes = []
6783 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
6784 linger.ms = 5
6785 max.block.ms = 60000
6786 max.in.flight.requests.per.connection = 5
6787 max.request.size = 1048576
6788 metadata.max.age.ms = 300000
6789 metadata.max.idle.ms = 300000
6790 metadata.recovery.rebootstrap.trigger.ms = 300000
6791 metadata.recovery.strategy = rebootstrap
6792 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
6793 metrics.num.samples = 2
6794 metrics.recording.level = INFO
6795 metrics.sample.window.ms = 30000
6796 partitioner.adaptive.partitioning.enable = true
6797 partitioner.availability.timeout.ms = 0
6798 partitioner.class = null
6799 partitioner.ignore.keys = false
6800 receive.buffer.bytes = 32768
6801 reconnect.backoff.max.ms = 1000
6802 reconnect.backoff.ms = 50
6803 request.timeout.ms = 30000
6804 retries = 2147483647
6805 retry.backoff.max.ms = 1000
6806 retry.backoff.ms = 100
6807 sasl.client.callback.handler.class = null
6808 sasl.jaas.config = null
6809 sasl.kerberos.kinit.cmd = /usr/bin/kinit
6810 sasl.kerberos.min.time.before.relogin = 60000
6811 sasl.kerberos.service.name = null
6812 sasl.kerberos.ticket.renew.jitter = 0.05
6813 sasl.kerberos.ticket.renew.window.factor = 0.8
6814 sasl.login.callback.handler.class = null
6815 sasl.login.class = null
6816 sasl.login.connect.timeout.ms = null
6817 sasl.login.read.timeout.ms = null
6818 sasl.login.refresh.buffer.seconds = 300
6819 sasl.login.refresh.min.period.seconds = 60
6820 sasl.login.refresh.window.factor = 0.8
6821 sasl.login.refresh.window.jitter = 0.05
6822 sasl.login.retry.backoff.max.ms = 10000
6823 sasl.login.retry.backoff.ms = 100
6824 sasl.mechanism = GSSAPI
6825 sasl.oauthbearer.assertion.algorithm = RS256
6826 sasl.oauthbearer.assertion.claim.aud = null
6827 sasl.oauthbearer.assertion.claim.exp.seconds = 300
6828 sasl.oauthbearer.assertion.claim.iss = null
6829 sasl.oauthbearer.assertion.claim.jti.include = false
6830 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
6831 sasl.oauthbearer.assertion.claim.sub = null
6832 sasl.oauthbearer.assertion.file = null
6833 sasl.oauthbearer.assertion.private.key.file = null
6834 sasl.oauthbearer.assertion.private.key.passphrase = null
6835 sasl.oauthbearer.assertion.template.file = null
6836 sasl.oauthbearer.client.credentials.client.id = null
6837 sasl.oauthbearer.client.credentials.client.secret = null
6838 sasl.oauthbearer.clock.skew.seconds = 30
6839 sasl.oauthbearer.expected.audience = null
6840 sasl.oauthbearer.expected.issuer = null
6841 sasl.oauthbearer.header.urlencode = false
6842 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
6843 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
6844 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
6845 sasl.oauthbearer.jwks.endpoint.url = null
6846 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
6847 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
6848 sasl.oauthbearer.scope = null
6849 sasl.oauthbearer.scope.claim.name = scope
6850 sasl.oauthbearer.sub.claim.name = sub
6851 sasl.oauthbearer.token.endpoint.url = null
6852 security.protocol = PLAINTEXT
6853 security.providers = null
6854 send.buffer.bytes = 131072
6855 socket.connection.setup.timeout.max.ms = 30000
6856 socket.connection.setup.timeout.ms = 10000
6857 ssl.cipher.suites = null
6858 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
6859 ssl.endpoint.identification.algorithm = https
6860 ssl.engine.factory.class = null
6861 ssl.key.password = null
6862 ssl.keymanager.algorithm = SunX509
6863 ssl.keystore.certificate.chain = null
6864 ssl.keystore.key = null
6865 ssl.keystore.location = null
6866 ssl.keystore.password = null
6867 ssl.keystore.type = JKS
6868 ssl.protocol = TLSv1.3
6869 ssl.provider = null
6870 ssl.secure.random.implementation = null
6871 ssl.trustmanager.algorithm = PKIX
6872 ssl.truststore.certificates = null
6873 ssl.truststore.location = null
6874 ssl.truststore.password = null
6875 ssl.truststore.type = JKS
6876 transaction.timeout.ms = 60000
6877 transaction.two.phase.commit.enable = false
6878 transactional.id = null
6879 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
6880
688112:05:45.809 [virtual-677] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
688212:05:45.809 [virtual-677] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Instantiated an idempotent producer.
688312:05:45.811 [data-plane-kafka-request-handler-7] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t5_2) to the active controller.
688412:05:45.811 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
688512:05:45.812 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
688612:05:45.812 [virtual-677] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327945811
688712:05:45.812 [virtual-681] WARN o.a.k.c.NetworkClient - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] The metadata response from the cluster reported a recoverable issue with correlation id 2 : {t5_2=UNKNOWN_TOPIC_OR_PARTITION}
688812:05:45.813 [virtual-681] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
688912:05:45.813 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t5_2', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
689012:05:45.814 [virtual-678] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Subscribed to topic(s): t5_1
689112:05:45.814 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t5_2 with topic ID i27OL8z_TEKIUcS0t4-w0A.
689212:05:45.814 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t5_2-0 with topic ID i27OL8z_TEKIUcS0t4-w0A and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
689312:05:45.814 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
689412:05:45.816 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
689512:05:45.816 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.Metadata - [Producer clientId=producer-15] Cluster ID: cERjULLDRBGv7lPJWPu8sA
689612:05:45.818 [kafka-producer-network-thread | producer-15] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-15] ProducerId set to 14 with epoch 0
689712:05:45.818 [virtual-678] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
689812:05:45.819 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
689912:05:45.819 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588 and requesting the member to rejoin with this id.
690012:05:45.820 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588
690112:05:45.820 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] (Re-)joining group
690212:05:45.820 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
690312:05:45.821 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588 joins group g5_1 in Empty state. Adding to the group now.
690412:05:45.821 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588 with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588).
690512:05:45.822 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in PreparingRebalance state. Created a new member id consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313 and requesting the member to rejoin with this id.
690612:05:45.823 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313
690712:05:45.823 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] (Re-)joining group
690812:05:45.824 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313 joins group g5_1 in PreparingRebalance state. Adding to the group now.
690912:05:45.840 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
691012:05:45.840 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t5_2-0)
691112:05:45.840 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t5_2-0 with topic id i27OL8z_TEKIUcS0t4-w0A.
691212:05:45.842 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t5_2-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
691312:05:45.842 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t5_2-0 in /tmp/kafka-logs4345019044203235659/t5_2-0 with properties {}
691412:05:45.842 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] No checkpointed highwatermark is found for partition t5_2-0
691512:05:45.842 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t5_2-0 broker=0] Log loaded for partition t5_2-0 with initial high watermark 0
691612:05:45.842 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t5_2-0 with topic id Some(i27OL8z_TEKIUcS0t4-w0A) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
691712:05:51.821 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 1 with 2 members.
691812:05:51.821 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588', protocol='range'}
691912:05:51.821 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313', protocol='range'}
692012:05:51.823 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Finished assignment for group at generation 1: {consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313=Assignment(partitions=[t5_1-0]), consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588=Assignment(partitions=[t5_2-0])}
692112:05:51.824 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588 for group g5_1 for generation 1. The group has 2 members, 0 of which are static.
692212:05:51.830 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313', protocol='range'}
692312:05:51.830 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588', protocol='range'}
692412:05:51.831 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
692512:05:51.831 [virtual-678] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
692612:05:51.831 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_2-0])
692712:05:51.831 [virtual-681] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Adding newly assigned partitions: [t5_2-0]
692812:05:51.832 [virtual-681] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Found no committed offset for partition t5_2-0
692912:05:51.832 [virtual-678] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Found no committed offset for partition t5_1-0
693012:05:51.833 [virtual-681] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting offset for partition t5_2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
693112:05:51.835 [virtual-678] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
693212:05:53.848 [virtual-678] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6933java.lang.InterruptedException: null
6934 ... 18 common frames omitted
6935Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
6936 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
6937 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
6938 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
6939 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
6940 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
6941 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
6942 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
6943 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
6944 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
6945 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
6946 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6947 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6948 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
6949 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
6950 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
6951 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6952 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6953 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
695412:05:53.848 [virtual-684] ERROR o.k.KafkaFlow$ - Exception when polling for records
6955java.lang.InterruptedException: null
6956 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
6957 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
6958 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
6959 at ox.channels.ActorRef.ask(actor.scala:64)
6960 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
6961 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
6962 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6963 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6964 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
6965 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6966 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
6967 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
6968 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
6969 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6970 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
6971 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
6972 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
6973 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
6974 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6975 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6976 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
697712:05:53.848 [virtual-681] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
6978java.lang.InterruptedException: null
6979 ... 18 common frames omitted
6980Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
6981 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
6982 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
6983 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
6984 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
6985 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
6986 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
6987 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
6988 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
6989 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
6990 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
6991 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
6992 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
6993 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
6994 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
6995 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
6996 at scala.Function0.apply$mcV$sp(Function0.scala:45)
6997 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
6998 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
699912:05:53.848 [virtual-680] ERROR o.k.KafkaFlow$ - Exception when polling for records
7000java.lang.InterruptedException: null
7001 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7002 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7003 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7004 at ox.channels.ActorRef.ask(actor.scala:64)
7005 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7006 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7007 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7008 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7009 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7010 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7011 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7012 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7013 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7014 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
701512:05:53.849 [virtual-690] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-15] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
701612:05:53.849 [virtual-691] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
701712:05:53.849 [virtual-691] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Member consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
701812:05:53.850 [virtual-691] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
701912:05:53.850 [virtual-691] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-9, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
702012:05:53.850 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
702112:05:53.851 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_1-9-17e24ee9-229b-40ae-8e5a-89d6144c3313) members.).
702212:05:53.851 [virtual-692] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Revoke previously assigned partitions [t5_2-0]
702312:05:53.852 [virtual-692] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Member consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
702412:05:53.852 [virtual-692] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
702512:05:53.852 [virtual-692] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-8, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
702612:05:53.852 [virtual-690] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
702712:05:53.852 [virtual-690] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
702812:05:53.852 [virtual-690] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
702912:05:53.852 [virtual-690] INFO o.a.k.c.m.Metrics - Metrics reporters closed
703012:05:53.852 [virtual-690] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-15 unregistered
703112:05:53.853 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-8-59a8ae10-40a2-4997-a6a4-2912527e8588 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
703212:05:53.853 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 2 is now empty.
703312:05:53.854 [virtual-691] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
703412:05:53.854 [virtual-691] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
703512:05:53.854 [virtual-691] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
703612:05:53.854 [virtual-691] INFO o.a.k.c.m.Metrics - Metrics reporters closed
703712:05:53.856 [virtual-691] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-9 unregistered
703812:05:53.860 [virtual-692] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
703912:05:53.860 [virtual-692] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
704012:05:53.860 [virtual-692] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
704112:05:53.861 [virtual-692] INFO o.a.k.c.m.Metrics - Metrics reporters closed
704212:05:53.862 [virtual-692] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-8 unregistered
704312:05:53.863 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7044 acks = -1
7045 batch.size = 16384
7046 bootstrap.servers = [localhost:6001]
7047 buffer.memory = 33554432
7048 client.dns.lookup = use_all_dns_ips
7049 client.id = producer-16
7050 compression.gzip.level = -1
7051 compression.lz4.level = 9
7052 compression.type = none
7053 compression.zstd.level = 3
7054 connections.max.idle.ms = 540000
7055 delivery.timeout.ms = 120000
7056 enable.idempotence = true
7057 enable.metrics.push = true
7058 interceptor.classes = []
7059 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7060 linger.ms = 5
7061 max.block.ms = 10000
7062 max.in.flight.requests.per.connection = 5
7063 max.request.size = 1048576
7064 metadata.max.age.ms = 300000
7065 metadata.max.idle.ms = 300000
7066 metadata.recovery.rebootstrap.trigger.ms = 300000
7067 metadata.recovery.strategy = rebootstrap
7068 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7069 metrics.num.samples = 2
7070 metrics.recording.level = INFO
7071 metrics.sample.window.ms = 30000
7072 partitioner.adaptive.partitioning.enable = true
7073 partitioner.availability.timeout.ms = 0
7074 partitioner.class = null
7075 partitioner.ignore.keys = false
7076 receive.buffer.bytes = 32768
7077 reconnect.backoff.max.ms = 1000
7078 reconnect.backoff.ms = 50
7079 request.timeout.ms = 30000
7080 retries = 2147483647
7081 retry.backoff.max.ms = 1000
7082 retry.backoff.ms = 1000
7083 sasl.client.callback.handler.class = null
7084 sasl.jaas.config = null
7085 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7086 sasl.kerberos.min.time.before.relogin = 60000
7087 sasl.kerberos.service.name = null
7088 sasl.kerberos.ticket.renew.jitter = 0.05
7089 sasl.kerberos.ticket.renew.window.factor = 0.8
7090 sasl.login.callback.handler.class = null
7091 sasl.login.class = null
7092 sasl.login.connect.timeout.ms = null
7093 sasl.login.read.timeout.ms = null
7094 sasl.login.refresh.buffer.seconds = 300
7095 sasl.login.refresh.min.period.seconds = 60
7096 sasl.login.refresh.window.factor = 0.8
7097 sasl.login.refresh.window.jitter = 0.05
7098 sasl.login.retry.backoff.max.ms = 10000
7099 sasl.login.retry.backoff.ms = 100
7100 sasl.mechanism = GSSAPI
7101 sasl.oauthbearer.assertion.algorithm = RS256
7102 sasl.oauthbearer.assertion.claim.aud = null
7103 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7104 sasl.oauthbearer.assertion.claim.iss = null
7105 sasl.oauthbearer.assertion.claim.jti.include = false
7106 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7107 sasl.oauthbearer.assertion.claim.sub = null
7108 sasl.oauthbearer.assertion.file = null
7109 sasl.oauthbearer.assertion.private.key.file = null
7110 sasl.oauthbearer.assertion.private.key.passphrase = null
7111 sasl.oauthbearer.assertion.template.file = null
7112 sasl.oauthbearer.client.credentials.client.id = null
7113 sasl.oauthbearer.client.credentials.client.secret = null
7114 sasl.oauthbearer.clock.skew.seconds = 30
7115 sasl.oauthbearer.expected.audience = null
7116 sasl.oauthbearer.expected.issuer = null
7117 sasl.oauthbearer.header.urlencode = false
7118 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7119 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7120 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7121 sasl.oauthbearer.jwks.endpoint.url = null
7122 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7123 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7124 sasl.oauthbearer.scope = null
7125 sasl.oauthbearer.scope.claim.name = scope
7126 sasl.oauthbearer.sub.claim.name = sub
7127 sasl.oauthbearer.token.endpoint.url = null
7128 security.protocol = PLAINTEXT
7129 security.providers = null
7130 send.buffer.bytes = 131072
7131 socket.connection.setup.timeout.max.ms = 30000
7132 socket.connection.setup.timeout.ms = 10000
7133 ssl.cipher.suites = null
7134 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7135 ssl.endpoint.identification.algorithm = https
7136 ssl.engine.factory.class = null
7137 ssl.key.password = null
7138 ssl.keymanager.algorithm = SunX509
7139 ssl.keystore.certificate.chain = null
7140 ssl.keystore.key = null
7141 ssl.keystore.location = null
7142 ssl.keystore.password = null
7143 ssl.keystore.type = JKS
7144 ssl.protocol = TLSv1.3
7145 ssl.provider = null
7146 ssl.secure.random.implementation = null
7147 ssl.trustmanager.algorithm = PKIX
7148 ssl.truststore.certificates = null
7149 ssl.truststore.location = null
7150 ssl.truststore.password = null
7151 ssl.truststore.type = JKS
7152 transaction.timeout.ms = 60000
7153 transaction.two.phase.commit.enable = false
7154 transactional.id = null
7155 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7156
715712:05:53.864 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
715812:05:53.864 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Instantiated an idempotent producer.
715912:05:53.866 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
716012:05:53.866 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
716112:05:53.866 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327953866
716212:05:53.869 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.Metadata - [Producer clientId=producer-16] Cluster ID: cERjULLDRBGv7lPJWPu8sA
716312:05:53.869 [kafka-producer-network-thread | producer-16] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-16] ProducerId set to 15 with epoch 0
716412:05:53.877 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-16] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
716512:05:53.879 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
716612:05:53.879 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
716712:05:53.879 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
716812:05:53.879 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
716912:05:53.879 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-16 unregistered
717012:05:53.880 [virtual-694] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7171 allow.auto.create.topics = true
7172 auto.commit.interval.ms = 5000
7173 auto.offset.reset = earliest
7174 bootstrap.servers = [localhost:6001]
7175 check.crcs = true
7176 client.dns.lookup = use_all_dns_ips
7177 client.id = consumer-g5_1-10
7178 client.rack =
7179 connections.max.idle.ms = 540000
7180 default.api.timeout.ms = 60000
7181 enable.auto.commit = false
7182 enable.metrics.push = true
7183 exclude.internal.topics = true
7184 fetch.max.bytes = 52428800
7185 fetch.max.wait.ms = 500
7186 fetch.min.bytes = 1
7187 group.id = g5_1
7188 group.instance.id = null
7189 group.protocol = classic
7190 group.remote.assignor = null
7191 heartbeat.interval.ms = 3000
7192 interceptor.classes = []
7193 internal.leave.group.on.close = true
7194 internal.throw.on.fetch.stable.offset.unsupported = false
7195 isolation.level = read_uncommitted
7196 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7197 max.partition.fetch.bytes = 1048576
7198 max.poll.interval.ms = 300000
7199 max.poll.records = 500
7200 metadata.max.age.ms = 300000
7201 metadata.recovery.rebootstrap.trigger.ms = 300000
7202 metadata.recovery.strategy = rebootstrap
7203 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7204 metrics.num.samples = 2
7205 metrics.recording.level = INFO
7206 metrics.sample.window.ms = 30000
7207 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7208 receive.buffer.bytes = 65536
7209 reconnect.backoff.max.ms = 1000
7210 reconnect.backoff.ms = 50
7211 request.timeout.ms = 30000
7212 retry.backoff.max.ms = 1000
7213 retry.backoff.ms = 100
7214 sasl.client.callback.handler.class = null
7215 sasl.jaas.config = null
7216 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7217 sasl.kerberos.min.time.before.relogin = 60000
7218 sasl.kerberos.service.name = null
7219 sasl.kerberos.ticket.renew.jitter = 0.05
7220 sasl.kerberos.ticket.renew.window.factor = 0.8
7221 sasl.login.callback.handler.class = null
7222 sasl.login.class = null
7223 sasl.login.connect.timeout.ms = null
7224 sasl.login.read.timeout.ms = null
7225 sasl.login.refresh.buffer.seconds = 300
7226 sasl.login.refresh.min.period.seconds = 60
7227 sasl.login.refresh.window.factor = 0.8
7228 sasl.login.refresh.window.jitter = 0.05
7229 sasl.login.retry.backoff.max.ms = 10000
7230 sasl.login.retry.backoff.ms = 100
7231 sasl.mechanism = GSSAPI
7232 sasl.oauthbearer.assertion.algorithm = RS256
7233 sasl.oauthbearer.assertion.claim.aud = null
7234 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7235 sasl.oauthbearer.assertion.claim.iss = null
7236 sasl.oauthbearer.assertion.claim.jti.include = false
7237 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7238 sasl.oauthbearer.assertion.claim.sub = null
7239 sasl.oauthbearer.assertion.file = null
7240 sasl.oauthbearer.assertion.private.key.file = null
7241 sasl.oauthbearer.assertion.private.key.passphrase = null
7242 sasl.oauthbearer.assertion.template.file = null
7243 sasl.oauthbearer.client.credentials.client.id = null
7244 sasl.oauthbearer.client.credentials.client.secret = null
7245 sasl.oauthbearer.clock.skew.seconds = 30
7246 sasl.oauthbearer.expected.audience = null
7247 sasl.oauthbearer.expected.issuer = null
7248 sasl.oauthbearer.header.urlencode = false
7249 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7250 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7251 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7252 sasl.oauthbearer.jwks.endpoint.url = null
7253 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7254 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7255 sasl.oauthbearer.scope = null
7256 sasl.oauthbearer.scope.claim.name = scope
7257 sasl.oauthbearer.sub.claim.name = sub
7258 sasl.oauthbearer.token.endpoint.url = null
7259 security.protocol = PLAINTEXT
7260 security.providers = null
7261 send.buffer.bytes = 131072
7262 session.timeout.ms = 45000
7263 share.acknowledgement.mode = implicit
7264 socket.connection.setup.timeout.max.ms = 30000
7265 socket.connection.setup.timeout.ms = 10000
7266 ssl.cipher.suites = null
7267 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7268 ssl.endpoint.identification.algorithm = https
7269 ssl.engine.factory.class = null
7270 ssl.key.password = null
7271 ssl.keymanager.algorithm = SunX509
7272 ssl.keystore.certificate.chain = null
7273 ssl.keystore.key = null
7274 ssl.keystore.location = null
7275 ssl.keystore.password = null
7276 ssl.keystore.type = JKS
7277 ssl.protocol = TLSv1.3
7278 ssl.provider = null
7279 ssl.secure.random.implementation = null
7280 ssl.trustmanager.algorithm = PKIX
7281 ssl.truststore.certificates = null
7282 ssl.truststore.location = null
7283 ssl.truststore.password = null
7284 ssl.truststore.type = JKS
7285 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7286
728712:05:53.880 [virtual-694] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
728812:05:53.882 [virtual-694] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
728912:05:53.882 [virtual-694] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
729012:05:53.882 [virtual-694] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327953882
729112:05:53.883 [virtual-697] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Subscribed to topic(s): t5_1
729212:05:53.885 [virtual-697] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
729312:05:53.886 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
729412:05:53.887 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
729512:05:53.890 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_1 in Empty state. Created a new member id consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641 and requesting the member to rejoin with this id.
729612:05:53.890 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: need to re-join with the given member-id: consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641
729712:05:53.890 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] (Re-)joining group
729812:05:53.891 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641 joins group g5_1 in Empty state. Adding to the group now.
729912:05:53.891 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641 with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641).
730012:05:56.891 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_1 generation 3 with 1 members.
730112:05:56.891 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641', protocol='range'}
730212:05:56.891 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Finished assignment for group at generation 3: {consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641=Assignment(partitions=[t5_1-0])}
730312:05:56.892 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641 for group g5_1 for generation 3. The group has 1 members, 0 of which are static.
730412:05:56.898 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641', protocol='range'}
730512:05:56.899 [virtual-697] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Notifying assignor about the new Assignment(partitions=[t5_1-0])
730612:05:56.899 [virtual-697] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Adding newly assigned partitions: [t5_1-0]
730712:05:56.900 [virtual-697] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t5_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
730812:05:56.904 [virtual-694] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7309 allow.auto.create.topics = true
7310 auto.commit.interval.ms = 5000
7311 auto.offset.reset = earliest
7312 bootstrap.servers = [localhost:6001]
7313 check.crcs = true
7314 client.dns.lookup = use_all_dns_ips
7315 client.id = consumer-g5_2-11
7316 client.rack =
7317 connections.max.idle.ms = 540000
7318 default.api.timeout.ms = 60000
7319 enable.auto.commit = false
7320 enable.metrics.push = true
7321 exclude.internal.topics = true
7322 fetch.max.bytes = 52428800
7323 fetch.max.wait.ms = 500
7324 fetch.min.bytes = 1
7325 group.id = g5_2
7326 group.instance.id = null
7327 group.protocol = classic
7328 group.remote.assignor = null
7329 heartbeat.interval.ms = 3000
7330 interceptor.classes = []
7331 internal.leave.group.on.close = true
7332 internal.throw.on.fetch.stable.offset.unsupported = false
7333 isolation.level = read_uncommitted
7334 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7335 max.partition.fetch.bytes = 1048576
7336 max.poll.interval.ms = 300000
7337 max.poll.records = 500
7338 metadata.max.age.ms = 300000
7339 metadata.recovery.rebootstrap.trigger.ms = 300000
7340 metadata.recovery.strategy = rebootstrap
7341 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7342 metrics.num.samples = 2
7343 metrics.recording.level = INFO
7344 metrics.sample.window.ms = 30000
7345 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7346 receive.buffer.bytes = 65536
7347 reconnect.backoff.max.ms = 1000
7348 reconnect.backoff.ms = 50
7349 request.timeout.ms = 30000
7350 retry.backoff.max.ms = 1000
7351 retry.backoff.ms = 100
7352 sasl.client.callback.handler.class = null
7353 sasl.jaas.config = null
7354 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7355 sasl.kerberos.min.time.before.relogin = 60000
7356 sasl.kerberos.service.name = null
7357 sasl.kerberos.ticket.renew.jitter = 0.05
7358 sasl.kerberos.ticket.renew.window.factor = 0.8
7359 sasl.login.callback.handler.class = null
7360 sasl.login.class = null
7361 sasl.login.connect.timeout.ms = null
7362 sasl.login.read.timeout.ms = null
7363 sasl.login.refresh.buffer.seconds = 300
7364 sasl.login.refresh.min.period.seconds = 60
7365 sasl.login.refresh.window.factor = 0.8
7366 sasl.login.refresh.window.jitter = 0.05
7367 sasl.login.retry.backoff.max.ms = 10000
7368 sasl.login.retry.backoff.ms = 100
7369 sasl.mechanism = GSSAPI
7370 sasl.oauthbearer.assertion.algorithm = RS256
7371 sasl.oauthbearer.assertion.claim.aud = null
7372 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7373 sasl.oauthbearer.assertion.claim.iss = null
7374 sasl.oauthbearer.assertion.claim.jti.include = false
7375 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7376 sasl.oauthbearer.assertion.claim.sub = null
7377 sasl.oauthbearer.assertion.file = null
7378 sasl.oauthbearer.assertion.private.key.file = null
7379 sasl.oauthbearer.assertion.private.key.passphrase = null
7380 sasl.oauthbearer.assertion.template.file = null
7381 sasl.oauthbearer.client.credentials.client.id = null
7382 sasl.oauthbearer.client.credentials.client.secret = null
7383 sasl.oauthbearer.clock.skew.seconds = 30
7384 sasl.oauthbearer.expected.audience = null
7385 sasl.oauthbearer.expected.issuer = null
7386 sasl.oauthbearer.header.urlencode = false
7387 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7388 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7389 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7390 sasl.oauthbearer.jwks.endpoint.url = null
7391 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7392 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7393 sasl.oauthbearer.scope = null
7394 sasl.oauthbearer.scope.claim.name = scope
7395 sasl.oauthbearer.sub.claim.name = sub
7396 sasl.oauthbearer.token.endpoint.url = null
7397 security.protocol = PLAINTEXT
7398 security.providers = null
7399 send.buffer.bytes = 131072
7400 session.timeout.ms = 45000
7401 share.acknowledgement.mode = implicit
7402 socket.connection.setup.timeout.max.ms = 30000
7403 socket.connection.setup.timeout.ms = 10000
7404 ssl.cipher.suites = null
7405 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7406 ssl.endpoint.identification.algorithm = https
7407 ssl.engine.factory.class = null
7408 ssl.key.password = null
7409 ssl.keymanager.algorithm = SunX509
7410 ssl.keystore.certificate.chain = null
7411 ssl.keystore.key = null
7412 ssl.keystore.location = null
7413 ssl.keystore.password = null
7414 ssl.keystore.type = JKS
7415 ssl.protocol = TLSv1.3
7416 ssl.provider = null
7417 ssl.secure.random.implementation = null
7418 ssl.trustmanager.algorithm = PKIX
7419 ssl.truststore.certificates = null
7420 ssl.truststore.location = null
7421 ssl.truststore.password = null
7422 ssl.truststore.type = JKS
7423 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7424
742512:05:56.904 [virtual-694] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
742612:05:56.907 [virtual-694] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
742712:05:56.907 [virtual-694] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
742812:05:56.907 [virtual-694] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327956907
742912:05:56.908 [virtual-701] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Subscribed to topic(s): t5_1
743012:05:56.910 [virtual-701] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Cluster ID: cERjULLDRBGv7lPJWPu8sA
743112:05:56.910 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
743212:05:56.911 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
743312:05:56.913 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g5_2 in Empty state. Created a new member id consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485 and requesting the member to rejoin with this id.
743412:05:56.914 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: need to re-join with the given member-id: consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485
743512:05:56.914 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] (Re-)joining group
743612:05:56.915 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485 joins group g5_2 in Empty state. Adding to the group now.
743712:05:56.915 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485 with group instance id null; client reason: need to re-join with the given member-id: consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485).
743812:05:59.915 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g5_2 generation 1 with 1 members.
743912:05:59.915 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485', protocol='range'}
744012:05:59.916 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Finished assignment for group at generation 1: {consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485=Assignment(partitions=[t5_1-0])}
744112:05:59.916 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485 for group g5_2 for generation 1. The group has 1 members, 0 of which are static.
744212:05:59.923 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485', protocol='range'}
744312:05:59.923 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Notifying assignor about the new Assignment(partitions=[t5_1-0])
744412:05:59.923 [virtual-701] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Adding newly assigned partitions: [t5_1-0]
744512:05:59.924 [virtual-701] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Found no committed offset for partition t5_1-0
744612:05:59.926 [virtual-701] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting offset for partition t5_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
744712:05:59.928 [virtual-696] ERROR o.k.KafkaFlow$ - Exception when polling for records
7448java.lang.InterruptedException: null
7449 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7450 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7451 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7452 at ox.channels.ActorRef.ask(actor.scala:64)
7453 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7454 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7455 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7456 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7457 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7458 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7459 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7460 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7461 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7462 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
746312:05:59.928 [virtual-700] ERROR o.k.KafkaFlow$ - Exception when polling for records
7464java.lang.InterruptedException: null
7465 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
7466 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
7467 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
7468 at ox.channels.ActorRef.ask(actor.scala:64)
7469 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
7470 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
7471 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7472 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7473 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
7474 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
7475 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
7476 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7477 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7478 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
747912:05:59.928 [virtual-701] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7480java.lang.InterruptedException: null
7481 ... 18 common frames omitted
7482Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7483 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7484 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7485 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7486 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7487 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7488 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7489 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7490 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7491 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7492 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7493 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7494 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7495 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7496 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7497 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7498 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7499 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7500 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
750112:05:59.928 [virtual-697] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
7502java.lang.InterruptedException: null
7503 ... 18 common frames omitted
7504Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
7505 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
7506 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
7507 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
7508 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
7509 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
7510 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
7511 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
7512 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
7513 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
7514 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
7515 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
7516 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
7517 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
7518 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
7519 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
7520 at scala.Function0.apply$mcV$sp(Function0.scala:45)
7521 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
7522 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
752312:05:59.929 [virtual-703] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Revoke previously assigned partitions [t5_1-0]
752412:05:59.929 [virtual-703] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Member consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
752512:05:59.929 [virtual-703] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Resetting generation and member id due to: consumer pro-actively leaving the group
752612:05:59.930 [virtual-703] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_1-10, groupId=g5_1] Request joining group due to: consumer pro-actively leaving the group
752712:05:59.929 [virtual-704] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Revoke previously assigned partitions [t5_1-0]
752812:05:59.930 [virtual-704] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Member consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
752912:05:59.930 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_1] Member consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
753012:05:59.930 [virtual-704] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Resetting generation and member id due to: consumer pro-actively leaving the group
753112:05:59.930 [virtual-704] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g5_2-11, groupId=g5_2] Request joining group due to: consumer pro-actively leaving the group
753212:05:59.930 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g5_1-10-b946200a-bf41-435d-8a01-97e48d926641) members.).
753312:05:59.930 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_1 with generation 4 is now empty.
753412:05:59.930 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g5_2] Member consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
753512:05:59.931 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g5_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g5_2-11-27bc821b-cc67-4ef5-bb01-aa79cdc1f485) members.).
753612:05:59.931 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g5_2 with generation 2 is now empty.
753712:06:00.413 [virtual-703] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
753812:06:00.413 [virtual-703] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
753912:06:00.413 [virtual-703] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
754012:06:00.413 [virtual-703] INFO o.a.k.c.m.Metrics - Metrics reporters closed
754112:06:00.415 [virtual-703] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_1-10 unregistered
754212:06:00.429 [virtual-704] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
754312:06:00.429 [virtual-704] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
754412:06:00.429 [virtual-704] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
754512:06:00.429 [virtual-704] INFO o.a.k.c.m.Metrics - Metrics reporters closed
754612:06:00.431 [virtual-704] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g5_2-11 unregistered
754712:06:00.433 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7548 acks = -1
7549 batch.size = 16384
7550 bootstrap.servers = [localhost:6001]
7551 buffer.memory = 33554432
7552 client.dns.lookup = use_all_dns_ips
7553 client.id = producer-17
7554 compression.gzip.level = -1
7555 compression.lz4.level = 9
7556 compression.type = none
7557 compression.zstd.level = 3
7558 connections.max.idle.ms = 540000
7559 delivery.timeout.ms = 120000
7560 enable.idempotence = true
7561 enable.metrics.push = true
7562 interceptor.classes = []
7563 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7564 linger.ms = 5
7565 max.block.ms = 10000
7566 max.in.flight.requests.per.connection = 5
7567 max.request.size = 1048576
7568 metadata.max.age.ms = 300000
7569 metadata.max.idle.ms = 300000
7570 metadata.recovery.rebootstrap.trigger.ms = 300000
7571 metadata.recovery.strategy = rebootstrap
7572 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7573 metrics.num.samples = 2
7574 metrics.recording.level = INFO
7575 metrics.sample.window.ms = 30000
7576 partitioner.adaptive.partitioning.enable = true
7577 partitioner.availability.timeout.ms = 0
7578 partitioner.class = null
7579 partitioner.ignore.keys = false
7580 receive.buffer.bytes = 32768
7581 reconnect.backoff.max.ms = 1000
7582 reconnect.backoff.ms = 50
7583 request.timeout.ms = 30000
7584 retries = 2147483647
7585 retry.backoff.max.ms = 1000
7586 retry.backoff.ms = 1000
7587 sasl.client.callback.handler.class = null
7588 sasl.jaas.config = null
7589 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7590 sasl.kerberos.min.time.before.relogin = 60000
7591 sasl.kerberos.service.name = null
7592 sasl.kerberos.ticket.renew.jitter = 0.05
7593 sasl.kerberos.ticket.renew.window.factor = 0.8
7594 sasl.login.callback.handler.class = null
7595 sasl.login.class = null
7596 sasl.login.connect.timeout.ms = null
7597 sasl.login.read.timeout.ms = null
7598 sasl.login.refresh.buffer.seconds = 300
7599 sasl.login.refresh.min.period.seconds = 60
7600 sasl.login.refresh.window.factor = 0.8
7601 sasl.login.refresh.window.jitter = 0.05
7602 sasl.login.retry.backoff.max.ms = 10000
7603 sasl.login.retry.backoff.ms = 100
7604 sasl.mechanism = GSSAPI
7605 sasl.oauthbearer.assertion.algorithm = RS256
7606 sasl.oauthbearer.assertion.claim.aud = null
7607 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7608 sasl.oauthbearer.assertion.claim.iss = null
7609 sasl.oauthbearer.assertion.claim.jti.include = false
7610 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7611 sasl.oauthbearer.assertion.claim.sub = null
7612 sasl.oauthbearer.assertion.file = null
7613 sasl.oauthbearer.assertion.private.key.file = null
7614 sasl.oauthbearer.assertion.private.key.passphrase = null
7615 sasl.oauthbearer.assertion.template.file = null
7616 sasl.oauthbearer.client.credentials.client.id = null
7617 sasl.oauthbearer.client.credentials.client.secret = null
7618 sasl.oauthbearer.clock.skew.seconds = 30
7619 sasl.oauthbearer.expected.audience = null
7620 sasl.oauthbearer.expected.issuer = null
7621 sasl.oauthbearer.header.urlencode = false
7622 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7623 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7624 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7625 sasl.oauthbearer.jwks.endpoint.url = null
7626 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7627 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7628 sasl.oauthbearer.scope = null
7629 sasl.oauthbearer.scope.claim.name = scope
7630 sasl.oauthbearer.sub.claim.name = sub
7631 sasl.oauthbearer.token.endpoint.url = null
7632 security.protocol = PLAINTEXT
7633 security.providers = null
7634 send.buffer.bytes = 131072
7635 socket.connection.setup.timeout.max.ms = 30000
7636 socket.connection.setup.timeout.ms = 10000
7637 ssl.cipher.suites = null
7638 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7639 ssl.endpoint.identification.algorithm = https
7640 ssl.engine.factory.class = null
7641 ssl.key.password = null
7642 ssl.keymanager.algorithm = SunX509
7643 ssl.keystore.certificate.chain = null
7644 ssl.keystore.key = null
7645 ssl.keystore.location = null
7646 ssl.keystore.password = null
7647 ssl.keystore.type = JKS
7648 ssl.protocol = TLSv1.3
7649 ssl.provider = null
7650 ssl.secure.random.implementation = null
7651 ssl.trustmanager.algorithm = PKIX
7652 ssl.truststore.certificates = null
7653 ssl.truststore.location = null
7654 ssl.truststore.password = null
7655 ssl.truststore.type = JKS
7656 transaction.timeout.ms = 60000
7657 transaction.two.phase.commit.enable = false
7658 transactional.id = null
7659 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7660
766112:06:00.433 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
766212:06:00.434 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Instantiated an idempotent producer.
766312:06:00.436 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
766412:06:00.436 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
766512:06:00.436 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327960436
766612:06:00.438 [data-plane-kafka-request-handler-3] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t6_1) to the active controller.
766712:06:00.439 [kafka-producer-network-thread | producer-17] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-17] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t6_1=UNKNOWN_TOPIC_OR_PARTITION}
766812:06:00.439 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.Metadata - [Producer clientId=producer-17] Cluster ID: cERjULLDRBGv7lPJWPu8sA
766912:06:00.440 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t6_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
767012:06:00.440 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t6_1 with topic ID shh6FeOnRUqapyt3hbyVuQ.
767112:06:00.441 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t6_1-0 with topic ID shh6FeOnRUqapyt3hbyVuQ and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
767212:06:00.439 [kafka-producer-network-thread | producer-17] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-17] ProducerId set to 16 with epoch 0
767312:06:00.466 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
767412:06:00.466 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t6_1-0)
767512:06:00.467 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t6_1-0 with topic id shh6FeOnRUqapyt3hbyVuQ.
767612:06:00.469 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t6_1-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
767712:06:00.469 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t6_1-0 in /tmp/kafka-logs4345019044203235659/t6_1-0 with properties {}
767812:06:00.470 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] No checkpointed highwatermark is found for partition t6_1-0
767912:06:00.470 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t6_1-0 broker=0] Log loaded for partition t6_1-0 with initial high watermark 0
768012:06:00.470 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t6_1-0 with topic id Some(shh6FeOnRUqapyt3hbyVuQ) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
768112:06:01.448 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-17] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
768212:06:01.450 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
768312:06:01.450 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
768412:06:01.450 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
768512:06:01.450 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
768612:06:01.450 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-17 unregistered
768712:06:01.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7688 acks = -1
7689 batch.size = 16384
7690 bootstrap.servers = [localhost:6001]
7691 buffer.memory = 33554432
7692 client.dns.lookup = use_all_dns_ips
7693 client.id = producer-18
7694 compression.gzip.level = -1
7695 compression.lz4.level = 9
7696 compression.type = none
7697 compression.zstd.level = 3
7698 connections.max.idle.ms = 540000
7699 delivery.timeout.ms = 120000
7700 enable.idempotence = true
7701 enable.metrics.push = true
7702 interceptor.classes = []
7703 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7704 linger.ms = 5
7705 max.block.ms = 10000
7706 max.in.flight.requests.per.connection = 5
7707 max.request.size = 1048576
7708 metadata.max.age.ms = 300000
7709 metadata.max.idle.ms = 300000
7710 metadata.recovery.rebootstrap.trigger.ms = 300000
7711 metadata.recovery.strategy = rebootstrap
7712 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7713 metrics.num.samples = 2
7714 metrics.recording.level = INFO
7715 metrics.sample.window.ms = 30000
7716 partitioner.adaptive.partitioning.enable = true
7717 partitioner.availability.timeout.ms = 0
7718 partitioner.class = null
7719 partitioner.ignore.keys = false
7720 receive.buffer.bytes = 32768
7721 reconnect.backoff.max.ms = 1000
7722 reconnect.backoff.ms = 50
7723 request.timeout.ms = 30000
7724 retries = 2147483647
7725 retry.backoff.max.ms = 1000
7726 retry.backoff.ms = 1000
7727 sasl.client.callback.handler.class = null
7728 sasl.jaas.config = null
7729 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7730 sasl.kerberos.min.time.before.relogin = 60000
7731 sasl.kerberos.service.name = null
7732 sasl.kerberos.ticket.renew.jitter = 0.05
7733 sasl.kerberos.ticket.renew.window.factor = 0.8
7734 sasl.login.callback.handler.class = null
7735 sasl.login.class = null
7736 sasl.login.connect.timeout.ms = null
7737 sasl.login.read.timeout.ms = null
7738 sasl.login.refresh.buffer.seconds = 300
7739 sasl.login.refresh.min.period.seconds = 60
7740 sasl.login.refresh.window.factor = 0.8
7741 sasl.login.refresh.window.jitter = 0.05
7742 sasl.login.retry.backoff.max.ms = 10000
7743 sasl.login.retry.backoff.ms = 100
7744 sasl.mechanism = GSSAPI
7745 sasl.oauthbearer.assertion.algorithm = RS256
7746 sasl.oauthbearer.assertion.claim.aud = null
7747 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7748 sasl.oauthbearer.assertion.claim.iss = null
7749 sasl.oauthbearer.assertion.claim.jti.include = false
7750 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7751 sasl.oauthbearer.assertion.claim.sub = null
7752 sasl.oauthbearer.assertion.file = null
7753 sasl.oauthbearer.assertion.private.key.file = null
7754 sasl.oauthbearer.assertion.private.key.passphrase = null
7755 sasl.oauthbearer.assertion.template.file = null
7756 sasl.oauthbearer.client.credentials.client.id = null
7757 sasl.oauthbearer.client.credentials.client.secret = null
7758 sasl.oauthbearer.clock.skew.seconds = 30
7759 sasl.oauthbearer.expected.audience = null
7760 sasl.oauthbearer.expected.issuer = null
7761 sasl.oauthbearer.header.urlencode = false
7762 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7763 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7764 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7765 sasl.oauthbearer.jwks.endpoint.url = null
7766 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7767 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7768 sasl.oauthbearer.scope = null
7769 sasl.oauthbearer.scope.claim.name = scope
7770 sasl.oauthbearer.sub.claim.name = sub
7771 sasl.oauthbearer.token.endpoint.url = null
7772 security.protocol = PLAINTEXT
7773 security.providers = null
7774 send.buffer.bytes = 131072
7775 socket.connection.setup.timeout.max.ms = 30000
7776 socket.connection.setup.timeout.ms = 10000
7777 ssl.cipher.suites = null
7778 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7779 ssl.endpoint.identification.algorithm = https
7780 ssl.engine.factory.class = null
7781 ssl.key.password = null
7782 ssl.keymanager.algorithm = SunX509
7783 ssl.keystore.certificate.chain = null
7784 ssl.keystore.key = null
7785 ssl.keystore.location = null
7786 ssl.keystore.password = null
7787 ssl.keystore.type = JKS
7788 ssl.protocol = TLSv1.3
7789 ssl.provider = null
7790 ssl.secure.random.implementation = null
7791 ssl.trustmanager.algorithm = PKIX
7792 ssl.truststore.certificates = null
7793 ssl.truststore.location = null
7794 ssl.truststore.password = null
7795 ssl.truststore.type = JKS
7796 transaction.timeout.ms = 60000
7797 transaction.two.phase.commit.enable = false
7798 transactional.id = null
7799 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7800
780112:06:01.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
780212:06:01.451 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Instantiated an idempotent producer.
780312:06:01.453 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
780412:06:01.453 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
780512:06:01.453 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327961453
780612:06:01.455 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.Metadata - [Producer clientId=producer-18] Cluster ID: cERjULLDRBGv7lPJWPu8sA
780712:06:01.456 [kafka-producer-network-thread | producer-18] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-18] ProducerId set to 17 with epoch 0
780812:06:01.464 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-18] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
780912:06:01.465 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
781012:06:01.465 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
781112:06:01.465 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
781212:06:01.465 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
781312:06:01.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-18 unregistered
781412:06:01.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
7815 acks = -1
7816 batch.size = 16384
7817 bootstrap.servers = [localhost:6001]
7818 buffer.memory = 33554432
7819 client.dns.lookup = use_all_dns_ips
7820 client.id = producer-19
7821 compression.gzip.level = -1
7822 compression.lz4.level = 9
7823 compression.type = none
7824 compression.zstd.level = 3
7825 connections.max.idle.ms = 540000
7826 delivery.timeout.ms = 120000
7827 enable.idempotence = true
7828 enable.metrics.push = true
7829 interceptor.classes = []
7830 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
7831 linger.ms = 5
7832 max.block.ms = 10000
7833 max.in.flight.requests.per.connection = 5
7834 max.request.size = 1048576
7835 metadata.max.age.ms = 300000
7836 metadata.max.idle.ms = 300000
7837 metadata.recovery.rebootstrap.trigger.ms = 300000
7838 metadata.recovery.strategy = rebootstrap
7839 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7840 metrics.num.samples = 2
7841 metrics.recording.level = INFO
7842 metrics.sample.window.ms = 30000
7843 partitioner.adaptive.partitioning.enable = true
7844 partitioner.availability.timeout.ms = 0
7845 partitioner.class = null
7846 partitioner.ignore.keys = false
7847 receive.buffer.bytes = 32768
7848 reconnect.backoff.max.ms = 1000
7849 reconnect.backoff.ms = 50
7850 request.timeout.ms = 30000
7851 retries = 2147483647
7852 retry.backoff.max.ms = 1000
7853 retry.backoff.ms = 1000
7854 sasl.client.callback.handler.class = null
7855 sasl.jaas.config = null
7856 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7857 sasl.kerberos.min.time.before.relogin = 60000
7858 sasl.kerberos.service.name = null
7859 sasl.kerberos.ticket.renew.jitter = 0.05
7860 sasl.kerberos.ticket.renew.window.factor = 0.8
7861 sasl.login.callback.handler.class = null
7862 sasl.login.class = null
7863 sasl.login.connect.timeout.ms = null
7864 sasl.login.read.timeout.ms = null
7865 sasl.login.refresh.buffer.seconds = 300
7866 sasl.login.refresh.min.period.seconds = 60
7867 sasl.login.refresh.window.factor = 0.8
7868 sasl.login.refresh.window.jitter = 0.05
7869 sasl.login.retry.backoff.max.ms = 10000
7870 sasl.login.retry.backoff.ms = 100
7871 sasl.mechanism = GSSAPI
7872 sasl.oauthbearer.assertion.algorithm = RS256
7873 sasl.oauthbearer.assertion.claim.aud = null
7874 sasl.oauthbearer.assertion.claim.exp.seconds = 300
7875 sasl.oauthbearer.assertion.claim.iss = null
7876 sasl.oauthbearer.assertion.claim.jti.include = false
7877 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
7878 sasl.oauthbearer.assertion.claim.sub = null
7879 sasl.oauthbearer.assertion.file = null
7880 sasl.oauthbearer.assertion.private.key.file = null
7881 sasl.oauthbearer.assertion.private.key.passphrase = null
7882 sasl.oauthbearer.assertion.template.file = null
7883 sasl.oauthbearer.client.credentials.client.id = null
7884 sasl.oauthbearer.client.credentials.client.secret = null
7885 sasl.oauthbearer.clock.skew.seconds = 30
7886 sasl.oauthbearer.expected.audience = null
7887 sasl.oauthbearer.expected.issuer = null
7888 sasl.oauthbearer.header.urlencode = false
7889 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
7890 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
7891 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
7892 sasl.oauthbearer.jwks.endpoint.url = null
7893 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
7894 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
7895 sasl.oauthbearer.scope = null
7896 sasl.oauthbearer.scope.claim.name = scope
7897 sasl.oauthbearer.sub.claim.name = sub
7898 sasl.oauthbearer.token.endpoint.url = null
7899 security.protocol = PLAINTEXT
7900 security.providers = null
7901 send.buffer.bytes = 131072
7902 socket.connection.setup.timeout.max.ms = 30000
7903 socket.connection.setup.timeout.ms = 10000
7904 ssl.cipher.suites = null
7905 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
7906 ssl.endpoint.identification.algorithm = https
7907 ssl.engine.factory.class = null
7908 ssl.key.password = null
7909 ssl.keymanager.algorithm = SunX509
7910 ssl.keystore.certificate.chain = null
7911 ssl.keystore.key = null
7912 ssl.keystore.location = null
7913 ssl.keystore.password = null
7914 ssl.keystore.type = JKS
7915 ssl.protocol = TLSv1.3
7916 ssl.provider = null
7917 ssl.secure.random.implementation = null
7918 ssl.trustmanager.algorithm = PKIX
7919 ssl.truststore.certificates = null
7920 ssl.truststore.location = null
7921 ssl.truststore.password = null
7922 ssl.truststore.type = JKS
7923 transaction.timeout.ms = 60000
7924 transaction.two.phase.commit.enable = false
7925 transactional.id = null
7926 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
7927
792812:06:01.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
792912:06:01.466 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Instantiated an idempotent producer.
793012:06:01.468 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
793112:06:01.468 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
793212:06:01.468 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327961468
793312:06:01.471 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.Metadata - [Producer clientId=producer-19] Cluster ID: cERjULLDRBGv7lPJWPu8sA
793412:06:01.472 [kafka-producer-network-thread | producer-19] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-19] ProducerId set to 18 with epoch 0
793512:06:01.479 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-19] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
793612:06:01.480 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
793712:06:01.480 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
793812:06:01.481 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
793912:06:01.481 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
794012:06:01.481 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-19 unregistered
794112:06:01.482 [virtual-710] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
7942 allow.auto.create.topics = true
7943 auto.commit.interval.ms = 5000
7944 auto.offset.reset = earliest
7945 bootstrap.servers = [localhost:6001]
7946 check.crcs = true
7947 client.dns.lookup = use_all_dns_ips
7948 client.id = consumer-g6_1-12
7949 client.rack =
7950 connections.max.idle.ms = 540000
7951 default.api.timeout.ms = 60000
7952 enable.auto.commit = false
7953 enable.metrics.push = true
7954 exclude.internal.topics = true
7955 fetch.max.bytes = 52428800
7956 fetch.max.wait.ms = 500
7957 fetch.min.bytes = 1
7958 group.id = g6_1
7959 group.instance.id = null
7960 group.protocol = classic
7961 group.remote.assignor = null
7962 heartbeat.interval.ms = 3000
7963 interceptor.classes = []
7964 internal.leave.group.on.close = true
7965 internal.throw.on.fetch.stable.offset.unsupported = false
7966 isolation.level = read_uncommitted
7967 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
7968 max.partition.fetch.bytes = 1048576
7969 max.poll.interval.ms = 300000
7970 max.poll.records = 500
7971 metadata.max.age.ms = 300000
7972 metadata.recovery.rebootstrap.trigger.ms = 300000
7973 metadata.recovery.strategy = rebootstrap
7974 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
7975 metrics.num.samples = 2
7976 metrics.recording.level = INFO
7977 metrics.sample.window.ms = 30000
7978 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
7979 receive.buffer.bytes = 65536
7980 reconnect.backoff.max.ms = 1000
7981 reconnect.backoff.ms = 50
7982 request.timeout.ms = 30000
7983 retry.backoff.max.ms = 1000
7984 retry.backoff.ms = 100
7985 sasl.client.callback.handler.class = null
7986 sasl.jaas.config = null
7987 sasl.kerberos.kinit.cmd = /usr/bin/kinit
7988 sasl.kerberos.min.time.before.relogin = 60000
7989 sasl.kerberos.service.name = null
7990 sasl.kerberos.ticket.renew.jitter = 0.05
7991 sasl.kerberos.ticket.renew.window.factor = 0.8
7992 sasl.login.callback.handler.class = null
7993 sasl.login.class = null
7994 sasl.login.connect.timeout.ms = null
7995 sasl.login.read.timeout.ms = null
7996 sasl.login.refresh.buffer.seconds = 300
7997 sasl.login.refresh.min.period.seconds = 60
7998 sasl.login.refresh.window.factor = 0.8
7999 sasl.login.refresh.window.jitter = 0.05
8000 sasl.login.retry.backoff.max.ms = 10000
8001 sasl.login.retry.backoff.ms = 100
8002 sasl.mechanism = GSSAPI
8003 sasl.oauthbearer.assertion.algorithm = RS256
8004 sasl.oauthbearer.assertion.claim.aud = null
8005 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8006 sasl.oauthbearer.assertion.claim.iss = null
8007 sasl.oauthbearer.assertion.claim.jti.include = false
8008 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8009 sasl.oauthbearer.assertion.claim.sub = null
8010 sasl.oauthbearer.assertion.file = null
8011 sasl.oauthbearer.assertion.private.key.file = null
8012 sasl.oauthbearer.assertion.private.key.passphrase = null
8013 sasl.oauthbearer.assertion.template.file = null
8014 sasl.oauthbearer.client.credentials.client.id = null
8015 sasl.oauthbearer.client.credentials.client.secret = null
8016 sasl.oauthbearer.clock.skew.seconds = 30
8017 sasl.oauthbearer.expected.audience = null
8018 sasl.oauthbearer.expected.issuer = null
8019 sasl.oauthbearer.header.urlencode = false
8020 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8021 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8022 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8023 sasl.oauthbearer.jwks.endpoint.url = null
8024 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8025 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8026 sasl.oauthbearer.scope = null
8027 sasl.oauthbearer.scope.claim.name = scope
8028 sasl.oauthbearer.sub.claim.name = sub
8029 sasl.oauthbearer.token.endpoint.url = null
8030 security.protocol = PLAINTEXT
8031 security.providers = null
8032 send.buffer.bytes = 131072
8033 session.timeout.ms = 45000
8034 share.acknowledgement.mode = implicit
8035 socket.connection.setup.timeout.max.ms = 30000
8036 socket.connection.setup.timeout.ms = 10000
8037 ssl.cipher.suites = null
8038 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8039 ssl.endpoint.identification.algorithm = https
8040 ssl.engine.factory.class = null
8041 ssl.key.password = null
8042 ssl.keymanager.algorithm = SunX509
8043 ssl.keystore.certificate.chain = null
8044 ssl.keystore.key = null
8045 ssl.keystore.location = null
8046 ssl.keystore.password = null
8047 ssl.keystore.type = JKS
8048 ssl.protocol = TLSv1.3
8049 ssl.provider = null
8050 ssl.secure.random.implementation = null
8051 ssl.trustmanager.algorithm = PKIX
8052 ssl.truststore.certificates = null
8053 ssl.truststore.location = null
8054 ssl.truststore.password = null
8055 ssl.truststore.type = JKS
8056 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8057
805812:06:01.482 [virtual-710] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
805912:06:01.485 [virtual-710] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
806012:06:01.485 [virtual-710] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
806112:06:01.485 [virtual-710] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327961485
806212:06:01.489 [virtual-711] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Subscribed to topic(s): t6_1
806312:06:01.492 [virtual-711] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
806412:06:01.492 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
806512:06:01.494 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
806612:06:01.496 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7 and requesting the member to rejoin with this id.
806712:06:01.496 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7
806812:06:01.496 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] (Re-)joining group
806912:06:01.497 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7 joins group g6_1 in Empty state. Adding to the group now.
807012:06:01.497 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7).
807112:06:04.498 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 1 with 1 members.
807212:06:04.498 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7', protocol='range'}
807312:06:04.499 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Finished assignment for group at generation 1: {consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7=Assignment(partitions=[t6_1-0])}
807412:06:04.499 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7 for group g6_1 for generation 1. The group has 1 members, 0 of which are static.
807512:06:04.505 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7', protocol='range'}
807612:06:04.505 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
807712:06:04.505 [virtual-711] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
807812:06:04.505 [virtual-711] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Found no committed offset for partition t6_1-0
807912:06:04.508 [virtual-711] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
808012:06:06.511 [virtual-711] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8081java.lang.InterruptedException: null
8082 ... 18 common frames omitted
8083Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8084 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8085 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8086 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8087 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8088 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8089 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8090 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8091 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8092 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8093 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8094 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8095 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8096 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8097 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8098 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8099 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8100 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8101 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
810212:06:06.511 [virtual-713] ERROR o.k.KafkaFlow$ - Exception when polling for records
8103java.lang.InterruptedException: null
8104 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8105 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8106 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8107 at ox.channels.ActorRef.ask(actor.scala:64)
8108 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8109 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
8110 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8111 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8112 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
8113 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
8114 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
8115 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
8116 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8117 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
8118 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
8119 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
8120 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
8121 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8122 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8123 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
812412:06:06.511 [virtual-718] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
812512:06:06.512 [virtual-718] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Member consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
812612:06:06.512 [virtual-718] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
812712:06:06.512 [virtual-718] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-12, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
812812:06:06.512 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
812912:06:06.512 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_1-12-0c31f7e8-d91b-4d37-a40b-b01274e4afd7) members.).
813012:06:06.512 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 2 is now empty.
813112:06:06.519 [virtual-718] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
813212:06:06.519 [virtual-718] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
813312:06:06.519 [virtual-718] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
813412:06:06.519 [virtual-718] INFO o.a.k.c.m.Metrics - Metrics reporters closed
813512:06:06.521 [virtual-718] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-12 unregistered
813612:06:06.522 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8137 acks = -1
8138 batch.size = 16384
8139 bootstrap.servers = [localhost:6001]
8140 buffer.memory = 33554432
8141 client.dns.lookup = use_all_dns_ips
8142 client.id = producer-20
8143 compression.gzip.level = -1
8144 compression.lz4.level = 9
8145 compression.type = none
8146 compression.zstd.level = 3
8147 connections.max.idle.ms = 540000
8148 delivery.timeout.ms = 120000
8149 enable.idempotence = true
8150 enable.metrics.push = true
8151 interceptor.classes = []
8152 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8153 linger.ms = 5
8154 max.block.ms = 10000
8155 max.in.flight.requests.per.connection = 5
8156 max.request.size = 1048576
8157 metadata.max.age.ms = 300000
8158 metadata.max.idle.ms = 300000
8159 metadata.recovery.rebootstrap.trigger.ms = 300000
8160 metadata.recovery.strategy = rebootstrap
8161 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8162 metrics.num.samples = 2
8163 metrics.recording.level = INFO
8164 metrics.sample.window.ms = 30000
8165 partitioner.adaptive.partitioning.enable = true
8166 partitioner.availability.timeout.ms = 0
8167 partitioner.class = null
8168 partitioner.ignore.keys = false
8169 receive.buffer.bytes = 32768
8170 reconnect.backoff.max.ms = 1000
8171 reconnect.backoff.ms = 50
8172 request.timeout.ms = 30000
8173 retries = 2147483647
8174 retry.backoff.max.ms = 1000
8175 retry.backoff.ms = 1000
8176 sasl.client.callback.handler.class = null
8177 sasl.jaas.config = null
8178 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8179 sasl.kerberos.min.time.before.relogin = 60000
8180 sasl.kerberos.service.name = null
8181 sasl.kerberos.ticket.renew.jitter = 0.05
8182 sasl.kerberos.ticket.renew.window.factor = 0.8
8183 sasl.login.callback.handler.class = null
8184 sasl.login.class = null
8185 sasl.login.connect.timeout.ms = null
8186 sasl.login.read.timeout.ms = null
8187 sasl.login.refresh.buffer.seconds = 300
8188 sasl.login.refresh.min.period.seconds = 60
8189 sasl.login.refresh.window.factor = 0.8
8190 sasl.login.refresh.window.jitter = 0.05
8191 sasl.login.retry.backoff.max.ms = 10000
8192 sasl.login.retry.backoff.ms = 100
8193 sasl.mechanism = GSSAPI
8194 sasl.oauthbearer.assertion.algorithm = RS256
8195 sasl.oauthbearer.assertion.claim.aud = null
8196 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8197 sasl.oauthbearer.assertion.claim.iss = null
8198 sasl.oauthbearer.assertion.claim.jti.include = false
8199 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8200 sasl.oauthbearer.assertion.claim.sub = null
8201 sasl.oauthbearer.assertion.file = null
8202 sasl.oauthbearer.assertion.private.key.file = null
8203 sasl.oauthbearer.assertion.private.key.passphrase = null
8204 sasl.oauthbearer.assertion.template.file = null
8205 sasl.oauthbearer.client.credentials.client.id = null
8206 sasl.oauthbearer.client.credentials.client.secret = null
8207 sasl.oauthbearer.clock.skew.seconds = 30
8208 sasl.oauthbearer.expected.audience = null
8209 sasl.oauthbearer.expected.issuer = null
8210 sasl.oauthbearer.header.urlencode = false
8211 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8212 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8213 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8214 sasl.oauthbearer.jwks.endpoint.url = null
8215 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8216 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8217 sasl.oauthbearer.scope = null
8218 sasl.oauthbearer.scope.claim.name = scope
8219 sasl.oauthbearer.sub.claim.name = sub
8220 sasl.oauthbearer.token.endpoint.url = null
8221 security.protocol = PLAINTEXT
8222 security.providers = null
8223 send.buffer.bytes = 131072
8224 socket.connection.setup.timeout.max.ms = 30000
8225 socket.connection.setup.timeout.ms = 10000
8226 ssl.cipher.suites = null
8227 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8228 ssl.endpoint.identification.algorithm = https
8229 ssl.engine.factory.class = null
8230 ssl.key.password = null
8231 ssl.keymanager.algorithm = SunX509
8232 ssl.keystore.certificate.chain = null
8233 ssl.keystore.key = null
8234 ssl.keystore.location = null
8235 ssl.keystore.password = null
8236 ssl.keystore.type = JKS
8237 ssl.protocol = TLSv1.3
8238 ssl.provider = null
8239 ssl.secure.random.implementation = null
8240 ssl.trustmanager.algorithm = PKIX
8241 ssl.truststore.certificates = null
8242 ssl.truststore.location = null
8243 ssl.truststore.password = null
8244 ssl.truststore.type = JKS
8245 transaction.timeout.ms = 60000
8246 transaction.two.phase.commit.enable = false
8247 transactional.id = null
8248 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8249
825012:06:06.522 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
825112:06:06.522 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Instantiated an idempotent producer.
825212:06:06.524 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
825312:06:06.524 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
825412:06:06.524 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327966524
825512:06:06.526 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.Metadata - [Producer clientId=producer-20] Cluster ID: cERjULLDRBGv7lPJWPu8sA
825612:06:06.526 [kafka-producer-network-thread | producer-20] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-20] ProducerId set to 19 with epoch 0
825712:06:06.535 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-20] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
825812:06:06.537 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
825912:06:06.537 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
826012:06:06.537 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
826112:06:06.537 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
826212:06:06.537 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-20 unregistered
826312:06:06.538 [virtual-720] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8264 allow.auto.create.topics = true
8265 auto.commit.interval.ms = 5000
8266 auto.offset.reset = earliest
8267 bootstrap.servers = [localhost:6001]
8268 check.crcs = true
8269 client.dns.lookup = use_all_dns_ips
8270 client.id = consumer-g6_1-13
8271 client.rack =
8272 connections.max.idle.ms = 540000
8273 default.api.timeout.ms = 60000
8274 enable.auto.commit = false
8275 enable.metrics.push = true
8276 exclude.internal.topics = true
8277 fetch.max.bytes = 52428800
8278 fetch.max.wait.ms = 500
8279 fetch.min.bytes = 1
8280 group.id = g6_1
8281 group.instance.id = null
8282 group.protocol = classic
8283 group.remote.assignor = null
8284 heartbeat.interval.ms = 3000
8285 interceptor.classes = []
8286 internal.leave.group.on.close = true
8287 internal.throw.on.fetch.stable.offset.unsupported = false
8288 isolation.level = read_uncommitted
8289 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8290 max.partition.fetch.bytes = 1048576
8291 max.poll.interval.ms = 300000
8292 max.poll.records = 500
8293 metadata.max.age.ms = 300000
8294 metadata.recovery.rebootstrap.trigger.ms = 300000
8295 metadata.recovery.strategy = rebootstrap
8296 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8297 metrics.num.samples = 2
8298 metrics.recording.level = INFO
8299 metrics.sample.window.ms = 30000
8300 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8301 receive.buffer.bytes = 65536
8302 reconnect.backoff.max.ms = 1000
8303 reconnect.backoff.ms = 50
8304 request.timeout.ms = 30000
8305 retry.backoff.max.ms = 1000
8306 retry.backoff.ms = 100
8307 sasl.client.callback.handler.class = null
8308 sasl.jaas.config = null
8309 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8310 sasl.kerberos.min.time.before.relogin = 60000
8311 sasl.kerberos.service.name = null
8312 sasl.kerberos.ticket.renew.jitter = 0.05
8313 sasl.kerberos.ticket.renew.window.factor = 0.8
8314 sasl.login.callback.handler.class = null
8315 sasl.login.class = null
8316 sasl.login.connect.timeout.ms = null
8317 sasl.login.read.timeout.ms = null
8318 sasl.login.refresh.buffer.seconds = 300
8319 sasl.login.refresh.min.period.seconds = 60
8320 sasl.login.refresh.window.factor = 0.8
8321 sasl.login.refresh.window.jitter = 0.05
8322 sasl.login.retry.backoff.max.ms = 10000
8323 sasl.login.retry.backoff.ms = 100
8324 sasl.mechanism = GSSAPI
8325 sasl.oauthbearer.assertion.algorithm = RS256
8326 sasl.oauthbearer.assertion.claim.aud = null
8327 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8328 sasl.oauthbearer.assertion.claim.iss = null
8329 sasl.oauthbearer.assertion.claim.jti.include = false
8330 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8331 sasl.oauthbearer.assertion.claim.sub = null
8332 sasl.oauthbearer.assertion.file = null
8333 sasl.oauthbearer.assertion.private.key.file = null
8334 sasl.oauthbearer.assertion.private.key.passphrase = null
8335 sasl.oauthbearer.assertion.template.file = null
8336 sasl.oauthbearer.client.credentials.client.id = null
8337 sasl.oauthbearer.client.credentials.client.secret = null
8338 sasl.oauthbearer.clock.skew.seconds = 30
8339 sasl.oauthbearer.expected.audience = null
8340 sasl.oauthbearer.expected.issuer = null
8341 sasl.oauthbearer.header.urlencode = false
8342 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8343 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8344 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8345 sasl.oauthbearer.jwks.endpoint.url = null
8346 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8347 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8348 sasl.oauthbearer.scope = null
8349 sasl.oauthbearer.scope.claim.name = scope
8350 sasl.oauthbearer.sub.claim.name = sub
8351 sasl.oauthbearer.token.endpoint.url = null
8352 security.protocol = PLAINTEXT
8353 security.providers = null
8354 send.buffer.bytes = 131072
8355 session.timeout.ms = 45000
8356 share.acknowledgement.mode = implicit
8357 socket.connection.setup.timeout.max.ms = 30000
8358 socket.connection.setup.timeout.ms = 10000
8359 ssl.cipher.suites = null
8360 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8361 ssl.endpoint.identification.algorithm = https
8362 ssl.engine.factory.class = null
8363 ssl.key.password = null
8364 ssl.keymanager.algorithm = SunX509
8365 ssl.keystore.certificate.chain = null
8366 ssl.keystore.key = null
8367 ssl.keystore.location = null
8368 ssl.keystore.password = null
8369 ssl.keystore.type = JKS
8370 ssl.protocol = TLSv1.3
8371 ssl.provider = null
8372 ssl.secure.random.implementation = null
8373 ssl.trustmanager.algorithm = PKIX
8374 ssl.truststore.certificates = null
8375 ssl.truststore.location = null
8376 ssl.truststore.password = null
8377 ssl.truststore.type = JKS
8378 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8379
838012:06:06.538 [virtual-720] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
838112:06:06.540 [virtual-720] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
838212:06:06.541 [virtual-720] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
838312:06:06.541 [virtual-720] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327966540
838412:06:06.541 [virtual-723] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Subscribed to topic(s): t6_1
838512:06:06.544 [virtual-723] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
838612:06:06.545 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
838712:06:06.545 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
838812:06:06.547 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_1 in Empty state. Created a new member id consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1 and requesting the member to rejoin with this id.
838912:06:06.547 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: need to re-join with the given member-id: consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1
839012:06:06.548 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] (Re-)joining group
839112:06:06.548 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1 joins group g6_1 in Empty state. Adding to the group now.
839212:06:06.548 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1).
839312:06:09.548 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_1 generation 3 with 1 members.
839412:06:09.549 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1', protocol='range'}
839512:06:09.549 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Finished assignment for group at generation 3: {consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1=Assignment(partitions=[t6_1-0])}
839612:06:09.550 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1 for group g6_1 for generation 3. The group has 1 members, 0 of which are static.
839712:06:09.555 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1', protocol='range'}
839812:06:09.556 [virtual-723] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Notifying assignor about the new Assignment(partitions=[t6_1-0])
839912:06:09.556 [virtual-723] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Adding newly assigned partitions: [t6_1-0]
840012:06:09.557 [virtual-723] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t6_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
840112:06:09.561 [virtual-720] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
8402 allow.auto.create.topics = true
8403 auto.commit.interval.ms = 5000
8404 auto.offset.reset = earliest
8405 bootstrap.servers = [localhost:6001]
8406 check.crcs = true
8407 client.dns.lookup = use_all_dns_ips
8408 client.id = consumer-g6_2-14
8409 client.rack =
8410 connections.max.idle.ms = 540000
8411 default.api.timeout.ms = 60000
8412 enable.auto.commit = false
8413 enable.metrics.push = true
8414 exclude.internal.topics = true
8415 fetch.max.bytes = 52428800
8416 fetch.max.wait.ms = 500
8417 fetch.min.bytes = 1
8418 group.id = g6_2
8419 group.instance.id = null
8420 group.protocol = classic
8421 group.remote.assignor = null
8422 heartbeat.interval.ms = 3000
8423 interceptor.classes = []
8424 internal.leave.group.on.close = true
8425 internal.throw.on.fetch.stable.offset.unsupported = false
8426 isolation.level = read_uncommitted
8427 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8428 max.partition.fetch.bytes = 1048576
8429 max.poll.interval.ms = 300000
8430 max.poll.records = 500
8431 metadata.max.age.ms = 300000
8432 metadata.recovery.rebootstrap.trigger.ms = 300000
8433 metadata.recovery.strategy = rebootstrap
8434 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8435 metrics.num.samples = 2
8436 metrics.recording.level = INFO
8437 metrics.sample.window.ms = 30000
8438 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
8439 receive.buffer.bytes = 65536
8440 reconnect.backoff.max.ms = 1000
8441 reconnect.backoff.ms = 50
8442 request.timeout.ms = 30000
8443 retry.backoff.max.ms = 1000
8444 retry.backoff.ms = 100
8445 sasl.client.callback.handler.class = null
8446 sasl.jaas.config = null
8447 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8448 sasl.kerberos.min.time.before.relogin = 60000
8449 sasl.kerberos.service.name = null
8450 sasl.kerberos.ticket.renew.jitter = 0.05
8451 sasl.kerberos.ticket.renew.window.factor = 0.8
8452 sasl.login.callback.handler.class = null
8453 sasl.login.class = null
8454 sasl.login.connect.timeout.ms = null
8455 sasl.login.read.timeout.ms = null
8456 sasl.login.refresh.buffer.seconds = 300
8457 sasl.login.refresh.min.period.seconds = 60
8458 sasl.login.refresh.window.factor = 0.8
8459 sasl.login.refresh.window.jitter = 0.05
8460 sasl.login.retry.backoff.max.ms = 10000
8461 sasl.login.retry.backoff.ms = 100
8462 sasl.mechanism = GSSAPI
8463 sasl.oauthbearer.assertion.algorithm = RS256
8464 sasl.oauthbearer.assertion.claim.aud = null
8465 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8466 sasl.oauthbearer.assertion.claim.iss = null
8467 sasl.oauthbearer.assertion.claim.jti.include = false
8468 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8469 sasl.oauthbearer.assertion.claim.sub = null
8470 sasl.oauthbearer.assertion.file = null
8471 sasl.oauthbearer.assertion.private.key.file = null
8472 sasl.oauthbearer.assertion.private.key.passphrase = null
8473 sasl.oauthbearer.assertion.template.file = null
8474 sasl.oauthbearer.client.credentials.client.id = null
8475 sasl.oauthbearer.client.credentials.client.secret = null
8476 sasl.oauthbearer.clock.skew.seconds = 30
8477 sasl.oauthbearer.expected.audience = null
8478 sasl.oauthbearer.expected.issuer = null
8479 sasl.oauthbearer.header.urlencode = false
8480 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8481 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8482 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8483 sasl.oauthbearer.jwks.endpoint.url = null
8484 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8485 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8486 sasl.oauthbearer.scope = null
8487 sasl.oauthbearer.scope.claim.name = scope
8488 sasl.oauthbearer.sub.claim.name = sub
8489 sasl.oauthbearer.token.endpoint.url = null
8490 security.protocol = PLAINTEXT
8491 security.providers = null
8492 send.buffer.bytes = 131072
8493 session.timeout.ms = 45000
8494 share.acknowledgement.mode = implicit
8495 socket.connection.setup.timeout.max.ms = 30000
8496 socket.connection.setup.timeout.ms = 10000
8497 ssl.cipher.suites = null
8498 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8499 ssl.endpoint.identification.algorithm = https
8500 ssl.engine.factory.class = null
8501 ssl.key.password = null
8502 ssl.keymanager.algorithm = SunX509
8503 ssl.keystore.certificate.chain = null
8504 ssl.keystore.key = null
8505 ssl.keystore.location = null
8506 ssl.keystore.password = null
8507 ssl.keystore.type = JKS
8508 ssl.protocol = TLSv1.3
8509 ssl.provider = null
8510 ssl.secure.random.implementation = null
8511 ssl.trustmanager.algorithm = PKIX
8512 ssl.truststore.certificates = null
8513 ssl.truststore.location = null
8514 ssl.truststore.password = null
8515 ssl.truststore.type = JKS
8516 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
8517
851812:06:09.561 [virtual-720] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
851912:06:09.564 [virtual-720] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
852012:06:09.564 [virtual-720] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
852112:06:09.564 [virtual-720] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327969564
852212:06:09.564 [virtual-727] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Subscribed to topic(s): t6_1
852312:06:09.567 [virtual-727] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Cluster ID: cERjULLDRBGv7lPJWPu8sA
852412:06:09.567 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
852512:06:09.568 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
852612:06:09.570 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g6_2 in Empty state. Created a new member id consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531 and requesting the member to rejoin with this id.
852712:06:09.570 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: need to re-join with the given member-id: consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531
852812:06:09.570 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] (Re-)joining group
852912:06:09.570 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531 joins group g6_2 in Empty state. Adding to the group now.
853012:06:09.570 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531 with group instance id null; client reason: need to re-join with the given member-id: consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531).
853112:06:10.083 [quorum-controller-0-event-handler] INFO o.a.k.c.EventPerformanceMonitor - [QuorumController id=0] In the last 60000 ms period, 350 controller events were completed, which took an average of 9.99 ms each. The slowest event was createTopics(439688223), which took 37.28 ms.
853212:06:12.571 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g6_2 generation 1 with 1 members.
853312:06:12.571 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531', protocol='range'}
853412:06:12.571 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Finished assignment for group at generation 1: {consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531=Assignment(partitions=[t6_1-0])}
853512:06:12.572 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531 for group g6_2 for generation 1. The group has 1 members, 0 of which are static.
853612:06:12.578 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531', protocol='range'}
853712:06:12.578 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Notifying assignor about the new Assignment(partitions=[t6_1-0])
853812:06:12.578 [virtual-727] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Adding newly assigned partitions: [t6_1-0]
853912:06:12.580 [virtual-727] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Found no committed offset for partition t6_1-0
854012:06:12.582 [virtual-727] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting offset for partition t6_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
854112:06:12.584 [virtual-722] ERROR o.k.KafkaFlow$ - Exception when polling for records
8542java.lang.InterruptedException: null
8543 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8544 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8545 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8546 at ox.channels.ActorRef.ask(actor.scala:64)
8547 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8548 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8549 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8550 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8551 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8552 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8553 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8554 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8555 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8556 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
855712:06:12.584 [virtual-726] ERROR o.k.KafkaFlow$ - Exception when polling for records
8558java.lang.InterruptedException: null
8559 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
8560 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
8561 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
8562 at ox.channels.ActorRef.ask(actor.scala:64)
8563 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
8564 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
8565 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8566 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8567 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
8568 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
8569 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
8570 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8571 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8572 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
857312:06:12.584 [virtual-727] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8574java.lang.InterruptedException: null
8575 ... 18 common frames omitted
8576Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8577 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8578 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8579 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8580 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8581 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8582 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8583 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8584 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8585 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8586 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8587 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8588 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8589 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8590 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8591 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8592 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8593 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8594 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
859512:06:12.584 [virtual-723] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
8596java.lang.InterruptedException: null
8597 ... 18 common frames omitted
8598Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
8599 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
8600 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
8601 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
8602 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
8603 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
8604 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
8605 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
8606 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
8607 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
8608 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
8609 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
8610 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
8611 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
8612 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
8613 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
8614 at scala.Function0.apply$mcV$sp(Function0.scala:45)
8615 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
8616 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
861712:06:12.585 [virtual-730] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Revoke previously assigned partitions [t6_1-0]
861812:06:12.585 [virtual-730] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Member consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
861912:06:12.585 [virtual-730] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Resetting generation and member id due to: consumer pro-actively leaving the group
862012:06:12.585 [virtual-730] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_1-13, groupId=g6_1] Request joining group due to: consumer pro-actively leaving the group
862112:06:12.585 [virtual-729] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Revoke previously assigned partitions [t6_1-0]
862212:06:12.585 [virtual-729] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Member consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
862312:06:12.585 [virtual-729] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Resetting generation and member id due to: consumer pro-actively leaving the group
862412:06:12.585 [virtual-729] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g6_2-14, groupId=g6_2] Request joining group due to: consumer pro-actively leaving the group
862512:06:12.585 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_1] Member consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
862612:06:12.585 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g6_1-13-e312281c-c232-47a4-af78-49cccd0be9f1) members.).
862712:06:12.585 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_1 with generation 4 is now empty.
862812:06:12.586 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g6_2] Member consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
862912:06:12.586 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g6_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g6_2-14-54b77ba3-754d-4802-b628-954bf5950531) members.).
863012:06:12.586 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g6_2 with generation 2 is now empty.
863112:06:13.070 [virtual-730] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
863212:06:13.070 [virtual-730] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
863312:06:13.070 [virtual-730] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
863412:06:13.070 [virtual-730] INFO o.a.k.c.m.Metrics - Metrics reporters closed
863512:06:13.072 [virtual-730] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_1-13 unregistered
863612:06:13.085 [virtual-729] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
863712:06:13.085 [virtual-729] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
863812:06:13.085 [virtual-729] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
863912:06:13.085 [virtual-729] INFO o.a.k.c.m.Metrics - Metrics reporters closed
864012:06:13.086 [virtual-729] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g6_2-14 unregistered
864112:06:13.088 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8642 acks = -1
8643 batch.size = 16384
8644 bootstrap.servers = [localhost:6001]
8645 buffer.memory = 33554432
8646 client.dns.lookup = use_all_dns_ips
8647 client.id = producer-21
8648 compression.gzip.level = -1
8649 compression.lz4.level = 9
8650 compression.type = none
8651 compression.zstd.level = 3
8652 connections.max.idle.ms = 540000
8653 delivery.timeout.ms = 120000
8654 enable.idempotence = true
8655 enable.metrics.push = true
8656 interceptor.classes = []
8657 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8658 linger.ms = 5
8659 max.block.ms = 10000
8660 max.in.flight.requests.per.connection = 5
8661 max.request.size = 1048576
8662 metadata.max.age.ms = 300000
8663 metadata.max.idle.ms = 300000
8664 metadata.recovery.rebootstrap.trigger.ms = 300000
8665 metadata.recovery.strategy = rebootstrap
8666 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8667 metrics.num.samples = 2
8668 metrics.recording.level = INFO
8669 metrics.sample.window.ms = 30000
8670 partitioner.adaptive.partitioning.enable = true
8671 partitioner.availability.timeout.ms = 0
8672 partitioner.class = null
8673 partitioner.ignore.keys = false
8674 receive.buffer.bytes = 32768
8675 reconnect.backoff.max.ms = 1000
8676 reconnect.backoff.ms = 50
8677 request.timeout.ms = 30000
8678 retries = 2147483647
8679 retry.backoff.max.ms = 1000
8680 retry.backoff.ms = 1000
8681 sasl.client.callback.handler.class = null
8682 sasl.jaas.config = null
8683 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8684 sasl.kerberos.min.time.before.relogin = 60000
8685 sasl.kerberos.service.name = null
8686 sasl.kerberos.ticket.renew.jitter = 0.05
8687 sasl.kerberos.ticket.renew.window.factor = 0.8
8688 sasl.login.callback.handler.class = null
8689 sasl.login.class = null
8690 sasl.login.connect.timeout.ms = null
8691 sasl.login.read.timeout.ms = null
8692 sasl.login.refresh.buffer.seconds = 300
8693 sasl.login.refresh.min.period.seconds = 60
8694 sasl.login.refresh.window.factor = 0.8
8695 sasl.login.refresh.window.jitter = 0.05
8696 sasl.login.retry.backoff.max.ms = 10000
8697 sasl.login.retry.backoff.ms = 100
8698 sasl.mechanism = GSSAPI
8699 sasl.oauthbearer.assertion.algorithm = RS256
8700 sasl.oauthbearer.assertion.claim.aud = null
8701 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8702 sasl.oauthbearer.assertion.claim.iss = null
8703 sasl.oauthbearer.assertion.claim.jti.include = false
8704 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8705 sasl.oauthbearer.assertion.claim.sub = null
8706 sasl.oauthbearer.assertion.file = null
8707 sasl.oauthbearer.assertion.private.key.file = null
8708 sasl.oauthbearer.assertion.private.key.passphrase = null
8709 sasl.oauthbearer.assertion.template.file = null
8710 sasl.oauthbearer.client.credentials.client.id = null
8711 sasl.oauthbearer.client.credentials.client.secret = null
8712 sasl.oauthbearer.clock.skew.seconds = 30
8713 sasl.oauthbearer.expected.audience = null
8714 sasl.oauthbearer.expected.issuer = null
8715 sasl.oauthbearer.header.urlencode = false
8716 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8717 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8718 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8719 sasl.oauthbearer.jwks.endpoint.url = null
8720 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8721 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8722 sasl.oauthbearer.scope = null
8723 sasl.oauthbearer.scope.claim.name = scope
8724 sasl.oauthbearer.sub.claim.name = sub
8725 sasl.oauthbearer.token.endpoint.url = null
8726 security.protocol = PLAINTEXT
8727 security.providers = null
8728 send.buffer.bytes = 131072
8729 socket.connection.setup.timeout.max.ms = 30000
8730 socket.connection.setup.timeout.ms = 10000
8731 ssl.cipher.suites = null
8732 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8733 ssl.endpoint.identification.algorithm = https
8734 ssl.engine.factory.class = null
8735 ssl.key.password = null
8736 ssl.keymanager.algorithm = SunX509
8737 ssl.keystore.certificate.chain = null
8738 ssl.keystore.key = null
8739 ssl.keystore.location = null
8740 ssl.keystore.password = null
8741 ssl.keystore.type = JKS
8742 ssl.protocol = TLSv1.3
8743 ssl.provider = null
8744 ssl.secure.random.implementation = null
8745 ssl.trustmanager.algorithm = PKIX
8746 ssl.truststore.certificates = null
8747 ssl.truststore.location = null
8748 ssl.truststore.password = null
8749 ssl.truststore.type = JKS
8750 transaction.timeout.ms = 60000
8751 transaction.two.phase.commit.enable = false
8752 transactional.id = null
8753 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8754
875512:06:13.088 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
875612:06:13.089 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Instantiated an idempotent producer.
875712:06:13.090 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
875812:06:13.090 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
875912:06:13.090 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327973090
876012:06:13.093 [data-plane-kafka-request-handler-1] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t7_1) to the active controller.
876112:06:13.094 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t7_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
876212:06:13.094 [kafka-producer-network-thread | producer-21] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-21] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t7_1=UNKNOWN_TOPIC_OR_PARTITION}
876312:06:13.094 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.Metadata - [Producer clientId=producer-21] Cluster ID: cERjULLDRBGv7lPJWPu8sA
876412:06:13.094 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t7_1 with topic ID yFHmdld6Q6Gn9iq-AGlDCA.
876512:06:13.094 [kafka-producer-network-thread | producer-21] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-21] ProducerId set to 20 with epoch 0
876612:06:13.094 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t7_1-0 with topic ID yFHmdld6Q6Gn9iq-AGlDCA and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
876712:06:13.120 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
876812:06:13.121 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t7_1-0)
876912:06:13.121 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t7_1-0 with topic id yFHmdld6Q6Gn9iq-AGlDCA.
877012:06:13.124 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t7_1-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
877112:06:13.124 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t7_1-0 in /tmp/kafka-logs4345019044203235659/t7_1-0 with properties {}
877212:06:13.124 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] No checkpointed highwatermark is found for partition t7_1-0
877312:06:13.124 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t7_1-0 broker=0] Log loaded for partition t7_1-0 with initial high watermark 0
877412:06:13.124 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t7_1-0 with topic id Some(yFHmdld6Q6Gn9iq-AGlDCA) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
877512:06:14.105 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-21] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
877612:06:14.106 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
877712:06:14.107 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
877812:06:14.107 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
877912:06:14.107 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
878012:06:14.107 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-21 unregistered
878112:06:14.107 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8782 acks = -1
8783 batch.size = 16384
8784 bootstrap.servers = [localhost:6001]
8785 buffer.memory = 33554432
8786 client.dns.lookup = use_all_dns_ips
8787 client.id = producer-22
8788 compression.gzip.level = -1
8789 compression.lz4.level = 9
8790 compression.type = none
8791 compression.zstd.level = 3
8792 connections.max.idle.ms = 540000
8793 delivery.timeout.ms = 120000
8794 enable.idempotence = true
8795 enable.metrics.push = true
8796 interceptor.classes = []
8797 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8798 linger.ms = 5
8799 max.block.ms = 10000
8800 max.in.flight.requests.per.connection = 5
8801 max.request.size = 1048576
8802 metadata.max.age.ms = 300000
8803 metadata.max.idle.ms = 300000
8804 metadata.recovery.rebootstrap.trigger.ms = 300000
8805 metadata.recovery.strategy = rebootstrap
8806 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8807 metrics.num.samples = 2
8808 metrics.recording.level = INFO
8809 metrics.sample.window.ms = 30000
8810 partitioner.adaptive.partitioning.enable = true
8811 partitioner.availability.timeout.ms = 0
8812 partitioner.class = null
8813 partitioner.ignore.keys = false
8814 receive.buffer.bytes = 32768
8815 reconnect.backoff.max.ms = 1000
8816 reconnect.backoff.ms = 50
8817 request.timeout.ms = 30000
8818 retries = 2147483647
8819 retry.backoff.max.ms = 1000
8820 retry.backoff.ms = 1000
8821 sasl.client.callback.handler.class = null
8822 sasl.jaas.config = null
8823 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8824 sasl.kerberos.min.time.before.relogin = 60000
8825 sasl.kerberos.service.name = null
8826 sasl.kerberos.ticket.renew.jitter = 0.05
8827 sasl.kerberos.ticket.renew.window.factor = 0.8
8828 sasl.login.callback.handler.class = null
8829 sasl.login.class = null
8830 sasl.login.connect.timeout.ms = null
8831 sasl.login.read.timeout.ms = null
8832 sasl.login.refresh.buffer.seconds = 300
8833 sasl.login.refresh.min.period.seconds = 60
8834 sasl.login.refresh.window.factor = 0.8
8835 sasl.login.refresh.window.jitter = 0.05
8836 sasl.login.retry.backoff.max.ms = 10000
8837 sasl.login.retry.backoff.ms = 100
8838 sasl.mechanism = GSSAPI
8839 sasl.oauthbearer.assertion.algorithm = RS256
8840 sasl.oauthbearer.assertion.claim.aud = null
8841 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8842 sasl.oauthbearer.assertion.claim.iss = null
8843 sasl.oauthbearer.assertion.claim.jti.include = false
8844 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8845 sasl.oauthbearer.assertion.claim.sub = null
8846 sasl.oauthbearer.assertion.file = null
8847 sasl.oauthbearer.assertion.private.key.file = null
8848 sasl.oauthbearer.assertion.private.key.passphrase = null
8849 sasl.oauthbearer.assertion.template.file = null
8850 sasl.oauthbearer.client.credentials.client.id = null
8851 sasl.oauthbearer.client.credentials.client.secret = null
8852 sasl.oauthbearer.clock.skew.seconds = 30
8853 sasl.oauthbearer.expected.audience = null
8854 sasl.oauthbearer.expected.issuer = null
8855 sasl.oauthbearer.header.urlencode = false
8856 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8857 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8858 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8859 sasl.oauthbearer.jwks.endpoint.url = null
8860 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8861 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8862 sasl.oauthbearer.scope = null
8863 sasl.oauthbearer.scope.claim.name = scope
8864 sasl.oauthbearer.sub.claim.name = sub
8865 sasl.oauthbearer.token.endpoint.url = null
8866 security.protocol = PLAINTEXT
8867 security.providers = null
8868 send.buffer.bytes = 131072
8869 socket.connection.setup.timeout.max.ms = 30000
8870 socket.connection.setup.timeout.ms = 10000
8871 ssl.cipher.suites = null
8872 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
8873 ssl.endpoint.identification.algorithm = https
8874 ssl.engine.factory.class = null
8875 ssl.key.password = null
8876 ssl.keymanager.algorithm = SunX509
8877 ssl.keystore.certificate.chain = null
8878 ssl.keystore.key = null
8879 ssl.keystore.location = null
8880 ssl.keystore.password = null
8881 ssl.keystore.type = JKS
8882 ssl.protocol = TLSv1.3
8883 ssl.provider = null
8884 ssl.secure.random.implementation = null
8885 ssl.trustmanager.algorithm = PKIX
8886 ssl.truststore.certificates = null
8887 ssl.truststore.location = null
8888 ssl.truststore.password = null
8889 ssl.truststore.type = JKS
8890 transaction.timeout.ms = 60000
8891 transaction.two.phase.commit.enable = false
8892 transactional.id = null
8893 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
8894
889512:06:14.108 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
889612:06:14.108 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Instantiated an idempotent producer.
889712:06:14.110 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
889812:06:14.110 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
889912:06:14.110 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327974110
890012:06:14.112 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.Metadata - [Producer clientId=producer-22] Cluster ID: cERjULLDRBGv7lPJWPu8sA
890112:06:14.112 [kafka-producer-network-thread | producer-22] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-22] ProducerId set to 21 with epoch 0
890212:06:14.120 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-22] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
890312:06:14.122 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
890412:06:14.122 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
890512:06:14.122 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
890612:06:14.122 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
890712:06:14.122 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-22 unregistered
890812:06:14.123 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
8909 acks = -1
8910 batch.size = 16384
8911 bootstrap.servers = [localhost:6001]
8912 buffer.memory = 33554432
8913 client.dns.lookup = use_all_dns_ips
8914 client.id = producer-23
8915 compression.gzip.level = -1
8916 compression.lz4.level = 9
8917 compression.type = none
8918 compression.zstd.level = 3
8919 connections.max.idle.ms = 540000
8920 delivery.timeout.ms = 120000
8921 enable.idempotence = true
8922 enable.metrics.push = true
8923 interceptor.classes = []
8924 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
8925 linger.ms = 5
8926 max.block.ms = 10000
8927 max.in.flight.requests.per.connection = 5
8928 max.request.size = 1048576
8929 metadata.max.age.ms = 300000
8930 metadata.max.idle.ms = 300000
8931 metadata.recovery.rebootstrap.trigger.ms = 300000
8932 metadata.recovery.strategy = rebootstrap
8933 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
8934 metrics.num.samples = 2
8935 metrics.recording.level = INFO
8936 metrics.sample.window.ms = 30000
8937 partitioner.adaptive.partitioning.enable = true
8938 partitioner.availability.timeout.ms = 0
8939 partitioner.class = null
8940 partitioner.ignore.keys = false
8941 receive.buffer.bytes = 32768
8942 reconnect.backoff.max.ms = 1000
8943 reconnect.backoff.ms = 50
8944 request.timeout.ms = 30000
8945 retries = 2147483647
8946 retry.backoff.max.ms = 1000
8947 retry.backoff.ms = 1000
8948 sasl.client.callback.handler.class = null
8949 sasl.jaas.config = null
8950 sasl.kerberos.kinit.cmd = /usr/bin/kinit
8951 sasl.kerberos.min.time.before.relogin = 60000
8952 sasl.kerberos.service.name = null
8953 sasl.kerberos.ticket.renew.jitter = 0.05
8954 sasl.kerberos.ticket.renew.window.factor = 0.8
8955 sasl.login.callback.handler.class = null
8956 sasl.login.class = null
8957 sasl.login.connect.timeout.ms = null
8958 sasl.login.read.timeout.ms = null
8959 sasl.login.refresh.buffer.seconds = 300
8960 sasl.login.refresh.min.period.seconds = 60
8961 sasl.login.refresh.window.factor = 0.8
8962 sasl.login.refresh.window.jitter = 0.05
8963 sasl.login.retry.backoff.max.ms = 10000
8964 sasl.login.retry.backoff.ms = 100
8965 sasl.mechanism = GSSAPI
8966 sasl.oauthbearer.assertion.algorithm = RS256
8967 sasl.oauthbearer.assertion.claim.aud = null
8968 sasl.oauthbearer.assertion.claim.exp.seconds = 300
8969 sasl.oauthbearer.assertion.claim.iss = null
8970 sasl.oauthbearer.assertion.claim.jti.include = false
8971 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
8972 sasl.oauthbearer.assertion.claim.sub = null
8973 sasl.oauthbearer.assertion.file = null
8974 sasl.oauthbearer.assertion.private.key.file = null
8975 sasl.oauthbearer.assertion.private.key.passphrase = null
8976 sasl.oauthbearer.assertion.template.file = null
8977 sasl.oauthbearer.client.credentials.client.id = null
8978 sasl.oauthbearer.client.credentials.client.secret = null
8979 sasl.oauthbearer.clock.skew.seconds = 30
8980 sasl.oauthbearer.expected.audience = null
8981 sasl.oauthbearer.expected.issuer = null
8982 sasl.oauthbearer.header.urlencode = false
8983 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
8984 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
8985 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
8986 sasl.oauthbearer.jwks.endpoint.url = null
8987 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
8988 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
8989 sasl.oauthbearer.scope = null
8990 sasl.oauthbearer.scope.claim.name = scope
8991 sasl.oauthbearer.sub.claim.name = sub
8992 sasl.oauthbearer.token.endpoint.url = null
8993 security.protocol = PLAINTEXT
8994 security.providers = null
8995 send.buffer.bytes = 131072
8996 socket.connection.setup.timeout.max.ms = 30000
8997 socket.connection.setup.timeout.ms = 10000
8998 ssl.cipher.suites = null
8999 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9000 ssl.endpoint.identification.algorithm = https
9001 ssl.engine.factory.class = null
9002 ssl.key.password = null
9003 ssl.keymanager.algorithm = SunX509
9004 ssl.keystore.certificate.chain = null
9005 ssl.keystore.key = null
9006 ssl.keystore.location = null
9007 ssl.keystore.password = null
9008 ssl.keystore.type = JKS
9009 ssl.protocol = TLSv1.3
9010 ssl.provider = null
9011 ssl.secure.random.implementation = null
9012 ssl.trustmanager.algorithm = PKIX
9013 ssl.truststore.certificates = null
9014 ssl.truststore.location = null
9015 ssl.truststore.password = null
9016 ssl.truststore.type = JKS
9017 transaction.timeout.ms = 60000
9018 transaction.two.phase.commit.enable = false
9019 transactional.id = null
9020 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9021
902212:06:14.123 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
902312:06:14.123 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Instantiated an idempotent producer.
902412:06:14.125 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
902512:06:14.125 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
902612:06:14.125 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327974125
902712:06:14.127 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.Metadata - [Producer clientId=producer-23] Cluster ID: cERjULLDRBGv7lPJWPu8sA
902812:06:14.127 [kafka-producer-network-thread | producer-23] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-23] ProducerId set to 22 with epoch 0
902912:06:14.135 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-23] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
903012:06:14.136 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
903112:06:14.136 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
903212:06:14.136 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
903312:06:14.136 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
903412:06:14.137 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-23 unregistered
903512:06:14.139 [virtual-736] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9036 allow.auto.create.topics = true
9037 auto.commit.interval.ms = 5000
9038 auto.offset.reset = earliest
9039 bootstrap.servers = [localhost:6001]
9040 check.crcs = true
9041 client.dns.lookup = use_all_dns_ips
9042 client.id = consumer-g7_1-15
9043 client.rack =
9044 connections.max.idle.ms = 540000
9045 default.api.timeout.ms = 60000
9046 enable.auto.commit = false
9047 enable.metrics.push = true
9048 exclude.internal.topics = true
9049 fetch.max.bytes = 52428800
9050 fetch.max.wait.ms = 500
9051 fetch.min.bytes = 1
9052 group.id = g7_1
9053 group.instance.id = null
9054 group.protocol = classic
9055 group.remote.assignor = null
9056 heartbeat.interval.ms = 3000
9057 interceptor.classes = []
9058 internal.leave.group.on.close = true
9059 internal.throw.on.fetch.stable.offset.unsupported = false
9060 isolation.level = read_uncommitted
9061 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9062 max.partition.fetch.bytes = 1048576
9063 max.poll.interval.ms = 300000
9064 max.poll.records = 500
9065 metadata.max.age.ms = 300000
9066 metadata.recovery.rebootstrap.trigger.ms = 300000
9067 metadata.recovery.strategy = rebootstrap
9068 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9069 metrics.num.samples = 2
9070 metrics.recording.level = INFO
9071 metrics.sample.window.ms = 30000
9072 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9073 receive.buffer.bytes = 65536
9074 reconnect.backoff.max.ms = 1000
9075 reconnect.backoff.ms = 50
9076 request.timeout.ms = 30000
9077 retry.backoff.max.ms = 1000
9078 retry.backoff.ms = 100
9079 sasl.client.callback.handler.class = null
9080 sasl.jaas.config = null
9081 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9082 sasl.kerberos.min.time.before.relogin = 60000
9083 sasl.kerberos.service.name = null
9084 sasl.kerberos.ticket.renew.jitter = 0.05
9085 sasl.kerberos.ticket.renew.window.factor = 0.8
9086 sasl.login.callback.handler.class = null
9087 sasl.login.class = null
9088 sasl.login.connect.timeout.ms = null
9089 sasl.login.read.timeout.ms = null
9090 sasl.login.refresh.buffer.seconds = 300
9091 sasl.login.refresh.min.period.seconds = 60
9092 sasl.login.refresh.window.factor = 0.8
9093 sasl.login.refresh.window.jitter = 0.05
9094 sasl.login.retry.backoff.max.ms = 10000
9095 sasl.login.retry.backoff.ms = 100
9096 sasl.mechanism = GSSAPI
9097 sasl.oauthbearer.assertion.algorithm = RS256
9098 sasl.oauthbearer.assertion.claim.aud = null
9099 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9100 sasl.oauthbearer.assertion.claim.iss = null
9101 sasl.oauthbearer.assertion.claim.jti.include = false
9102 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9103 sasl.oauthbearer.assertion.claim.sub = null
9104 sasl.oauthbearer.assertion.file = null
9105 sasl.oauthbearer.assertion.private.key.file = null
9106 sasl.oauthbearer.assertion.private.key.passphrase = null
9107 sasl.oauthbearer.assertion.template.file = null
9108 sasl.oauthbearer.client.credentials.client.id = null
9109 sasl.oauthbearer.client.credentials.client.secret = null
9110 sasl.oauthbearer.clock.skew.seconds = 30
9111 sasl.oauthbearer.expected.audience = null
9112 sasl.oauthbearer.expected.issuer = null
9113 sasl.oauthbearer.header.urlencode = false
9114 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9115 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9116 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9117 sasl.oauthbearer.jwks.endpoint.url = null
9118 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9119 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9120 sasl.oauthbearer.scope = null
9121 sasl.oauthbearer.scope.claim.name = scope
9122 sasl.oauthbearer.sub.claim.name = sub
9123 sasl.oauthbearer.token.endpoint.url = null
9124 security.protocol = PLAINTEXT
9125 security.providers = null
9126 send.buffer.bytes = 131072
9127 session.timeout.ms = 45000
9128 share.acknowledgement.mode = implicit
9129 socket.connection.setup.timeout.max.ms = 30000
9130 socket.connection.setup.timeout.ms = 10000
9131 ssl.cipher.suites = null
9132 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9133 ssl.endpoint.identification.algorithm = https
9134 ssl.engine.factory.class = null
9135 ssl.key.password = null
9136 ssl.keymanager.algorithm = SunX509
9137 ssl.keystore.certificate.chain = null
9138 ssl.keystore.key = null
9139 ssl.keystore.location = null
9140 ssl.keystore.password = null
9141 ssl.keystore.type = JKS
9142 ssl.protocol = TLSv1.3
9143 ssl.provider = null
9144 ssl.secure.random.implementation = null
9145 ssl.trustmanager.algorithm = PKIX
9146 ssl.truststore.certificates = null
9147 ssl.truststore.location = null
9148 ssl.truststore.password = null
9149 ssl.truststore.type = JKS
9150 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9151
915212:06:14.140 [virtual-736] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
915312:06:14.144 [virtual-736] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
915412:06:14.144 [virtual-736] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
915512:06:14.144 [virtual-736] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327974144
915612:06:14.145 [virtual-737] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Subscribed to topic(s): t7_1
915712:06:14.148 [virtual-737] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
915812:06:14.149 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
915912:06:14.150 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
916012:06:14.152 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e and requesting the member to rejoin with this id.
916112:06:14.152 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e
916212:06:14.153 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] (Re-)joining group
916312:06:14.153 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e joins group g7_1 in Empty state. Adding to the group now.
916412:06:14.154 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e).
916512:06:17.154 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 1 with 1 members.
916612:06:17.155 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e', protocol='range'}
916712:06:17.155 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Finished assignment for group at generation 1: {consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e=Assignment(partitions=[t7_1-0])}
916812:06:17.156 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e for group g7_1 for generation 1. The group has 1 members, 0 of which are static.
916912:06:17.162 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e', protocol='range'}
917012:06:17.162 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
917112:06:17.162 [virtual-737] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
917212:06:17.163 [virtual-737] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Found no committed offset for partition t7_1-0
917312:06:17.165 [virtual-737] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
917412:06:19.168 [virtual-737] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9175java.lang.InterruptedException: null
9176 ... 18 common frames omitted
9177Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9178 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9179 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9180 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9181 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9182 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9183 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9184 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9185 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9186 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9187 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9188 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9189 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9190 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9191 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9192 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9193 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9194 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9195 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
919612:06:19.168 [virtual-739] ERROR o.k.KafkaFlow$ - Exception when polling for records
9197java.lang.InterruptedException: null
9198 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9199 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9200 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9201 at ox.channels.ActorRef.ask(actor.scala:64)
9202 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9203 at ox.kafka.KafkaFlow$.subscribe$$anonfun$2(KafkaFlow.scala:33)
9204 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9205 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9206 at ox.flow.FlowCompanionOps$$anon$1.run(FlowCompanionOps.scala:29)
9207 at ox.flow.FlowOps$$anon$3.run(FlowOps.scala:56)
9208 at ox.flow.FlowOps.runLastToChannelAsync$$anonfun$1(FlowOps.scala:1021)
9209 at ox.flow.FlowOps.$anonfun$adapted$6(FlowOps.scala:1023)
9210 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9211 at ox.channels.forkPropagate$package$.forkPropagate$$anonfun$1(forkPropagate.scala:15)
9212 at ox.channels.forkPropagate$package$.$anonfun$adapted$1(forkPropagate.scala:16)
9213 at ox.fork$package$.forkUnsupervised$$anonfun$1(fork.scala:128)
9214 at ox.fork$package$.forkUnsupervised$$anonfun$adapted$1(fork.scala:129)
9215 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9216 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9217 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
921812:06:19.169 [virtual-744] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
921912:06:19.169 [virtual-744] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Member consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
922012:06:19.169 [virtual-744] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
922112:06:19.169 [virtual-744] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-15, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
922212:06:19.170 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
922312:06:19.170 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_1-15-d55bf522-fc32-4cd6-b168-f0f8b660fb0e) members.).
922412:06:19.170 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 2 is now empty.
922512:06:19.180 [virtual-744] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
922612:06:19.181 [virtual-744] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
922712:06:19.181 [virtual-744] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
922812:06:19.181 [virtual-744] INFO o.a.k.c.m.Metrics - Metrics reporters closed
922912:06:19.182 [virtual-744] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-15 unregistered
923012:06:19.183 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9231 acks = -1
9232 batch.size = 16384
9233 bootstrap.servers = [localhost:6001]
9234 buffer.memory = 33554432
9235 client.dns.lookup = use_all_dns_ips
9236 client.id = producer-24
9237 compression.gzip.level = -1
9238 compression.lz4.level = 9
9239 compression.type = none
9240 compression.zstd.level = 3
9241 connections.max.idle.ms = 540000
9242 delivery.timeout.ms = 120000
9243 enable.idempotence = true
9244 enable.metrics.push = true
9245 interceptor.classes = []
9246 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9247 linger.ms = 5
9248 max.block.ms = 10000
9249 max.in.flight.requests.per.connection = 5
9250 max.request.size = 1048576
9251 metadata.max.age.ms = 300000
9252 metadata.max.idle.ms = 300000
9253 metadata.recovery.rebootstrap.trigger.ms = 300000
9254 metadata.recovery.strategy = rebootstrap
9255 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9256 metrics.num.samples = 2
9257 metrics.recording.level = INFO
9258 metrics.sample.window.ms = 30000
9259 partitioner.adaptive.partitioning.enable = true
9260 partitioner.availability.timeout.ms = 0
9261 partitioner.class = null
9262 partitioner.ignore.keys = false
9263 receive.buffer.bytes = 32768
9264 reconnect.backoff.max.ms = 1000
9265 reconnect.backoff.ms = 50
9266 request.timeout.ms = 30000
9267 retries = 2147483647
9268 retry.backoff.max.ms = 1000
9269 retry.backoff.ms = 1000
9270 sasl.client.callback.handler.class = null
9271 sasl.jaas.config = null
9272 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9273 sasl.kerberos.min.time.before.relogin = 60000
9274 sasl.kerberos.service.name = null
9275 sasl.kerberos.ticket.renew.jitter = 0.05
9276 sasl.kerberos.ticket.renew.window.factor = 0.8
9277 sasl.login.callback.handler.class = null
9278 sasl.login.class = null
9279 sasl.login.connect.timeout.ms = null
9280 sasl.login.read.timeout.ms = null
9281 sasl.login.refresh.buffer.seconds = 300
9282 sasl.login.refresh.min.period.seconds = 60
9283 sasl.login.refresh.window.factor = 0.8
9284 sasl.login.refresh.window.jitter = 0.05
9285 sasl.login.retry.backoff.max.ms = 10000
9286 sasl.login.retry.backoff.ms = 100
9287 sasl.mechanism = GSSAPI
9288 sasl.oauthbearer.assertion.algorithm = RS256
9289 sasl.oauthbearer.assertion.claim.aud = null
9290 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9291 sasl.oauthbearer.assertion.claim.iss = null
9292 sasl.oauthbearer.assertion.claim.jti.include = false
9293 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9294 sasl.oauthbearer.assertion.claim.sub = null
9295 sasl.oauthbearer.assertion.file = null
9296 sasl.oauthbearer.assertion.private.key.file = null
9297 sasl.oauthbearer.assertion.private.key.passphrase = null
9298 sasl.oauthbearer.assertion.template.file = null
9299 sasl.oauthbearer.client.credentials.client.id = null
9300 sasl.oauthbearer.client.credentials.client.secret = null
9301 sasl.oauthbearer.clock.skew.seconds = 30
9302 sasl.oauthbearer.expected.audience = null
9303 sasl.oauthbearer.expected.issuer = null
9304 sasl.oauthbearer.header.urlencode = false
9305 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9306 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9307 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9308 sasl.oauthbearer.jwks.endpoint.url = null
9309 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9310 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9311 sasl.oauthbearer.scope = null
9312 sasl.oauthbearer.scope.claim.name = scope
9313 sasl.oauthbearer.sub.claim.name = sub
9314 sasl.oauthbearer.token.endpoint.url = null
9315 security.protocol = PLAINTEXT
9316 security.providers = null
9317 send.buffer.bytes = 131072
9318 socket.connection.setup.timeout.max.ms = 30000
9319 socket.connection.setup.timeout.ms = 10000
9320 ssl.cipher.suites = null
9321 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9322 ssl.endpoint.identification.algorithm = https
9323 ssl.engine.factory.class = null
9324 ssl.key.password = null
9325 ssl.keymanager.algorithm = SunX509
9326 ssl.keystore.certificate.chain = null
9327 ssl.keystore.key = null
9328 ssl.keystore.location = null
9329 ssl.keystore.password = null
9330 ssl.keystore.type = JKS
9331 ssl.protocol = TLSv1.3
9332 ssl.provider = null
9333 ssl.secure.random.implementation = null
9334 ssl.trustmanager.algorithm = PKIX
9335 ssl.truststore.certificates = null
9336 ssl.truststore.location = null
9337 ssl.truststore.password = null
9338 ssl.truststore.type = JKS
9339 transaction.timeout.ms = 60000
9340 transaction.two.phase.commit.enable = false
9341 transactional.id = null
9342 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9343
934412:06:19.183 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
934512:06:19.183 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Instantiated an idempotent producer.
934612:06:19.185 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
934712:06:19.185 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
934812:06:19.185 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327979185
934912:06:19.187 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.Metadata - [Producer clientId=producer-24] Cluster ID: cERjULLDRBGv7lPJWPu8sA
935012:06:19.187 [kafka-producer-network-thread | producer-24] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-24] ProducerId set to 23 with epoch 0
935112:06:19.196 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-24] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
935212:06:19.197 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
935312:06:19.197 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
935412:06:19.197 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
935512:06:19.197 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
935612:06:19.197 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-24 unregistered
935712:06:19.199 [virtual-746] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9358 allow.auto.create.topics = true
9359 auto.commit.interval.ms = 5000
9360 auto.offset.reset = earliest
9361 bootstrap.servers = [localhost:6001]
9362 check.crcs = true
9363 client.dns.lookup = use_all_dns_ips
9364 client.id = consumer-g7_1-16
9365 client.rack =
9366 connections.max.idle.ms = 540000
9367 default.api.timeout.ms = 60000
9368 enable.auto.commit = false
9369 enable.metrics.push = true
9370 exclude.internal.topics = true
9371 fetch.max.bytes = 52428800
9372 fetch.max.wait.ms = 500
9373 fetch.min.bytes = 1
9374 group.id = g7_1
9375 group.instance.id = null
9376 group.protocol = classic
9377 group.remote.assignor = null
9378 heartbeat.interval.ms = 3000
9379 interceptor.classes = []
9380 internal.leave.group.on.close = true
9381 internal.throw.on.fetch.stable.offset.unsupported = false
9382 isolation.level = read_uncommitted
9383 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9384 max.partition.fetch.bytes = 1048576
9385 max.poll.interval.ms = 300000
9386 max.poll.records = 500
9387 metadata.max.age.ms = 300000
9388 metadata.recovery.rebootstrap.trigger.ms = 300000
9389 metadata.recovery.strategy = rebootstrap
9390 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9391 metrics.num.samples = 2
9392 metrics.recording.level = INFO
9393 metrics.sample.window.ms = 30000
9394 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9395 receive.buffer.bytes = 65536
9396 reconnect.backoff.max.ms = 1000
9397 reconnect.backoff.ms = 50
9398 request.timeout.ms = 30000
9399 retry.backoff.max.ms = 1000
9400 retry.backoff.ms = 100
9401 sasl.client.callback.handler.class = null
9402 sasl.jaas.config = null
9403 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9404 sasl.kerberos.min.time.before.relogin = 60000
9405 sasl.kerberos.service.name = null
9406 sasl.kerberos.ticket.renew.jitter = 0.05
9407 sasl.kerberos.ticket.renew.window.factor = 0.8
9408 sasl.login.callback.handler.class = null
9409 sasl.login.class = null
9410 sasl.login.connect.timeout.ms = null
9411 sasl.login.read.timeout.ms = null
9412 sasl.login.refresh.buffer.seconds = 300
9413 sasl.login.refresh.min.period.seconds = 60
9414 sasl.login.refresh.window.factor = 0.8
9415 sasl.login.refresh.window.jitter = 0.05
9416 sasl.login.retry.backoff.max.ms = 10000
9417 sasl.login.retry.backoff.ms = 100
9418 sasl.mechanism = GSSAPI
9419 sasl.oauthbearer.assertion.algorithm = RS256
9420 sasl.oauthbearer.assertion.claim.aud = null
9421 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9422 sasl.oauthbearer.assertion.claim.iss = null
9423 sasl.oauthbearer.assertion.claim.jti.include = false
9424 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9425 sasl.oauthbearer.assertion.claim.sub = null
9426 sasl.oauthbearer.assertion.file = null
9427 sasl.oauthbearer.assertion.private.key.file = null
9428 sasl.oauthbearer.assertion.private.key.passphrase = null
9429 sasl.oauthbearer.assertion.template.file = null
9430 sasl.oauthbearer.client.credentials.client.id = null
9431 sasl.oauthbearer.client.credentials.client.secret = null
9432 sasl.oauthbearer.clock.skew.seconds = 30
9433 sasl.oauthbearer.expected.audience = null
9434 sasl.oauthbearer.expected.issuer = null
9435 sasl.oauthbearer.header.urlencode = false
9436 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9437 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9438 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9439 sasl.oauthbearer.jwks.endpoint.url = null
9440 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9441 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9442 sasl.oauthbearer.scope = null
9443 sasl.oauthbearer.scope.claim.name = scope
9444 sasl.oauthbearer.sub.claim.name = sub
9445 sasl.oauthbearer.token.endpoint.url = null
9446 security.protocol = PLAINTEXT
9447 security.providers = null
9448 send.buffer.bytes = 131072
9449 session.timeout.ms = 45000
9450 share.acknowledgement.mode = implicit
9451 socket.connection.setup.timeout.max.ms = 30000
9452 socket.connection.setup.timeout.ms = 10000
9453 ssl.cipher.suites = null
9454 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9455 ssl.endpoint.identification.algorithm = https
9456 ssl.engine.factory.class = null
9457 ssl.key.password = null
9458 ssl.keymanager.algorithm = SunX509
9459 ssl.keystore.certificate.chain = null
9460 ssl.keystore.key = null
9461 ssl.keystore.location = null
9462 ssl.keystore.password = null
9463 ssl.keystore.type = JKS
9464 ssl.protocol = TLSv1.3
9465 ssl.provider = null
9466 ssl.secure.random.implementation = null
9467 ssl.trustmanager.algorithm = PKIX
9468 ssl.truststore.certificates = null
9469 ssl.truststore.location = null
9470 ssl.truststore.password = null
9471 ssl.truststore.type = JKS
9472 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9473
947412:06:19.199 [virtual-746] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
947512:06:19.201 [virtual-746] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
947612:06:19.202 [virtual-746] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
947712:06:19.202 [virtual-746] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327979201
947812:06:19.202 [virtual-749] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Subscribed to topic(s): t7_1
947912:06:19.204 [virtual-749] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
948012:06:19.205 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
948112:06:19.206 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
948212:06:19.207 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_1 in Empty state. Created a new member id consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e and requesting the member to rejoin with this id.
948312:06:19.208 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: need to re-join with the given member-id: consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e
948412:06:19.208 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] (Re-)joining group
948512:06:19.209 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e joins group g7_1 in Empty state. Adding to the group now.
948612:06:19.209 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e).
948712:06:22.209 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_1 generation 3 with 1 members.
948812:06:22.209 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e', protocol='range'}
948912:06:22.210 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Finished assignment for group at generation 3: {consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e=Assignment(partitions=[t7_1-0])}
949012:06:22.210 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e for group g7_1 for generation 3. The group has 1 members, 0 of which are static.
949112:06:22.217 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e', protocol='range'}
949212:06:22.217 [virtual-749] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Notifying assignor about the new Assignment(partitions=[t7_1-0])
949312:06:22.217 [virtual-749] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Adding newly assigned partitions: [t7_1-0]
949412:06:22.218 [virtual-749] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t7_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
949512:06:22.221 [virtual-746] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
9496 allow.auto.create.topics = true
9497 auto.commit.interval.ms = 5000
9498 auto.offset.reset = earliest
9499 bootstrap.servers = [localhost:6001]
9500 check.crcs = true
9501 client.dns.lookup = use_all_dns_ips
9502 client.id = consumer-g7_2-17
9503 client.rack =
9504 connections.max.idle.ms = 540000
9505 default.api.timeout.ms = 60000
9506 enable.auto.commit = false
9507 enable.metrics.push = true
9508 exclude.internal.topics = true
9509 fetch.max.bytes = 52428800
9510 fetch.max.wait.ms = 500
9511 fetch.min.bytes = 1
9512 group.id = g7_2
9513 group.instance.id = null
9514 group.protocol = classic
9515 group.remote.assignor = null
9516 heartbeat.interval.ms = 3000
9517 interceptor.classes = []
9518 internal.leave.group.on.close = true
9519 internal.throw.on.fetch.stable.offset.unsupported = false
9520 isolation.level = read_uncommitted
9521 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9522 max.partition.fetch.bytes = 1048576
9523 max.poll.interval.ms = 300000
9524 max.poll.records = 500
9525 metadata.max.age.ms = 300000
9526 metadata.recovery.rebootstrap.trigger.ms = 300000
9527 metadata.recovery.strategy = rebootstrap
9528 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9529 metrics.num.samples = 2
9530 metrics.recording.level = INFO
9531 metrics.sample.window.ms = 30000
9532 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
9533 receive.buffer.bytes = 65536
9534 reconnect.backoff.max.ms = 1000
9535 reconnect.backoff.ms = 50
9536 request.timeout.ms = 30000
9537 retry.backoff.max.ms = 1000
9538 retry.backoff.ms = 100
9539 sasl.client.callback.handler.class = null
9540 sasl.jaas.config = null
9541 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9542 sasl.kerberos.min.time.before.relogin = 60000
9543 sasl.kerberos.service.name = null
9544 sasl.kerberos.ticket.renew.jitter = 0.05
9545 sasl.kerberos.ticket.renew.window.factor = 0.8
9546 sasl.login.callback.handler.class = null
9547 sasl.login.class = null
9548 sasl.login.connect.timeout.ms = null
9549 sasl.login.read.timeout.ms = null
9550 sasl.login.refresh.buffer.seconds = 300
9551 sasl.login.refresh.min.period.seconds = 60
9552 sasl.login.refresh.window.factor = 0.8
9553 sasl.login.refresh.window.jitter = 0.05
9554 sasl.login.retry.backoff.max.ms = 10000
9555 sasl.login.retry.backoff.ms = 100
9556 sasl.mechanism = GSSAPI
9557 sasl.oauthbearer.assertion.algorithm = RS256
9558 sasl.oauthbearer.assertion.claim.aud = null
9559 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9560 sasl.oauthbearer.assertion.claim.iss = null
9561 sasl.oauthbearer.assertion.claim.jti.include = false
9562 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9563 sasl.oauthbearer.assertion.claim.sub = null
9564 sasl.oauthbearer.assertion.file = null
9565 sasl.oauthbearer.assertion.private.key.file = null
9566 sasl.oauthbearer.assertion.private.key.passphrase = null
9567 sasl.oauthbearer.assertion.template.file = null
9568 sasl.oauthbearer.client.credentials.client.id = null
9569 sasl.oauthbearer.client.credentials.client.secret = null
9570 sasl.oauthbearer.clock.skew.seconds = 30
9571 sasl.oauthbearer.expected.audience = null
9572 sasl.oauthbearer.expected.issuer = null
9573 sasl.oauthbearer.header.urlencode = false
9574 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9575 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9576 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9577 sasl.oauthbearer.jwks.endpoint.url = null
9578 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9579 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9580 sasl.oauthbearer.scope = null
9581 sasl.oauthbearer.scope.claim.name = scope
9582 sasl.oauthbearer.sub.claim.name = sub
9583 sasl.oauthbearer.token.endpoint.url = null
9584 security.protocol = PLAINTEXT
9585 security.providers = null
9586 send.buffer.bytes = 131072
9587 session.timeout.ms = 45000
9588 share.acknowledgement.mode = implicit
9589 socket.connection.setup.timeout.max.ms = 30000
9590 socket.connection.setup.timeout.ms = 10000
9591 ssl.cipher.suites = null
9592 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9593 ssl.endpoint.identification.algorithm = https
9594 ssl.engine.factory.class = null
9595 ssl.key.password = null
9596 ssl.keymanager.algorithm = SunX509
9597 ssl.keystore.certificate.chain = null
9598 ssl.keystore.key = null
9599 ssl.keystore.location = null
9600 ssl.keystore.password = null
9601 ssl.keystore.type = JKS
9602 ssl.protocol = TLSv1.3
9603 ssl.provider = null
9604 ssl.secure.random.implementation = null
9605 ssl.trustmanager.algorithm = PKIX
9606 ssl.truststore.certificates = null
9607 ssl.truststore.location = null
9608 ssl.truststore.password = null
9609 ssl.truststore.type = JKS
9610 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
9611
961212:06:22.221 [virtual-746] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
961312:06:22.223 [virtual-746] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
961412:06:22.224 [virtual-746] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
961512:06:22.224 [virtual-746] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327982223
961612:06:22.224 [virtual-753] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Subscribed to topic(s): t7_1
961712:06:22.228 [virtual-753] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Cluster ID: cERjULLDRBGv7lPJWPu8sA
961812:06:22.229 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
961912:06:22.230 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
962012:06:22.232 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g7_2 in Empty state. Created a new member id consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5 and requesting the member to rejoin with this id.
962112:06:22.232 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: need to re-join with the given member-id: consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5
962212:06:22.232 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] (Re-)joining group
962312:06:22.232 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5 joins group g7_2 in Empty state. Adding to the group now.
962412:06:22.233 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5 with group instance id null; client reason: need to re-join with the given member-id: consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5).
962512:06:25.233 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g7_2 generation 1 with 1 members.
962612:06:25.234 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5', protocol='range'}
962712:06:25.234 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Finished assignment for group at generation 1: {consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5=Assignment(partitions=[t7_1-0])}
962812:06:25.235 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5 for group g7_2 for generation 1. The group has 1 members, 0 of which are static.
962912:06:25.241 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5', protocol='range'}
963012:06:25.241 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Notifying assignor about the new Assignment(partitions=[t7_1-0])
963112:06:25.241 [virtual-753] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Adding newly assigned partitions: [t7_1-0]
963212:06:25.242 [virtual-753] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Found no committed offset for partition t7_1-0
963312:06:25.244 [virtual-753] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting offset for partition t7_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
963412:06:25.245 [virtual-752] ERROR o.k.KafkaFlow$ - Exception when polling for records
9635java.lang.InterruptedException: null
9636 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9637 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9638 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9639 at ox.channels.ActorRef.ask(actor.scala:64)
9640 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9641 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9642 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9643 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9644 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9645 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9646 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9647 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9648 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9649 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
965012:06:25.245 [virtual-753] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9651java.lang.InterruptedException: null
9652 ... 18 common frames omitted
9653Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9654 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9655 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9656 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9657 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9658 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9659 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9660 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9661 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9662 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9663 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9664 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9665 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9666 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9667 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9668 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9669 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9670 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9671 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
967212:06:25.246 [virtual-748] ERROR o.k.KafkaFlow$ - Exception when polling for records
9673java.lang.InterruptedException: null
9674 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
9675 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
9676 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
9677 at ox.channels.ActorRef.ask(actor.scala:64)
9678 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
9679 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
9680 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9681 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9682 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
9683 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
9684 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
9685 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9686 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9687 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
968812:06:25.246 [virtual-755] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Revoke previously assigned partitions [t7_1-0]
968912:06:25.246 [virtual-755] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Member consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
969012:06:25.246 [virtual-755] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Resetting generation and member id due to: consumer pro-actively leaving the group
969112:06:25.246 [virtual-755] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_2-17, groupId=g7_2] Request joining group due to: consumer pro-actively leaving the group
969212:06:25.247 [virtual-749] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
9693java.lang.InterruptedException: null
9694 ... 18 common frames omitted
9695Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
9696 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
9697 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
9698 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
9699 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
9700 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
9701 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
9702 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
9703 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
9704 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
9705 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
9706 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
9707 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
9708 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
9709 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
9710 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
9711 at scala.Function0.apply$mcV$sp(Function0.scala:45)
9712 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
9713 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
971412:06:25.247 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_2] Member consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
971512:06:25.247 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g7_2-17-442b994f-9bfd-4020-8039-62775067cea5) members.).
971612:06:25.247 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_2 with generation 2 is now empty.
971712:06:25.248 [virtual-756] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Revoke previously assigned partitions [t7_1-0]
971812:06:25.248 [virtual-756] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Member consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
971912:06:25.248 [virtual-756] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Resetting generation and member id due to: consumer pro-actively leaving the group
972012:06:25.248 [virtual-756] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g7_1-16, groupId=g7_1] Request joining group due to: consumer pro-actively leaving the group
972112:06:25.248 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g7_1] Member consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
972212:06:25.248 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g7_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g7_1-16-b517efe5-9ad5-4c76-912a-90f1a942946e) members.).
972312:06:25.248 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g7_1 with generation 4 is now empty.
972412:06:25.732 [virtual-756] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
972512:06:25.732 [virtual-756] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
972612:06:25.732 [virtual-756] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
972712:06:25.733 [virtual-756] INFO o.a.k.c.m.Metrics - Metrics reporters closed
972812:06:25.734 [virtual-756] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_1-16 unregistered
972912:06:25.746 [virtual-755] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
973012:06:25.746 [virtual-755] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
973112:06:25.746 [virtual-755] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
973212:06:25.746 [virtual-755] INFO o.a.k.c.m.Metrics - Metrics reporters closed
973312:06:25.748 [virtual-755] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g7_2-17 unregistered
973412:06:25.749 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9735 acks = -1
9736 batch.size = 16384
9737 bootstrap.servers = [localhost:6001]
9738 buffer.memory = 33554432
9739 client.dns.lookup = use_all_dns_ips
9740 client.id = producer-25
9741 compression.gzip.level = -1
9742 compression.lz4.level = 9
9743 compression.type = none
9744 compression.zstd.level = 3
9745 connections.max.idle.ms = 540000
9746 delivery.timeout.ms = 120000
9747 enable.idempotence = true
9748 enable.metrics.push = true
9749 interceptor.classes = []
9750 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9751 linger.ms = 5
9752 max.block.ms = 10000
9753 max.in.flight.requests.per.connection = 5
9754 max.request.size = 1048576
9755 metadata.max.age.ms = 300000
9756 metadata.max.idle.ms = 300000
9757 metadata.recovery.rebootstrap.trigger.ms = 300000
9758 metadata.recovery.strategy = rebootstrap
9759 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9760 metrics.num.samples = 2
9761 metrics.recording.level = INFO
9762 metrics.sample.window.ms = 30000
9763 partitioner.adaptive.partitioning.enable = true
9764 partitioner.availability.timeout.ms = 0
9765 partitioner.class = null
9766 partitioner.ignore.keys = false
9767 receive.buffer.bytes = 32768
9768 reconnect.backoff.max.ms = 1000
9769 reconnect.backoff.ms = 50
9770 request.timeout.ms = 30000
9771 retries = 2147483647
9772 retry.backoff.max.ms = 1000
9773 retry.backoff.ms = 1000
9774 sasl.client.callback.handler.class = null
9775 sasl.jaas.config = null
9776 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9777 sasl.kerberos.min.time.before.relogin = 60000
9778 sasl.kerberos.service.name = null
9779 sasl.kerberos.ticket.renew.jitter = 0.05
9780 sasl.kerberos.ticket.renew.window.factor = 0.8
9781 sasl.login.callback.handler.class = null
9782 sasl.login.class = null
9783 sasl.login.connect.timeout.ms = null
9784 sasl.login.read.timeout.ms = null
9785 sasl.login.refresh.buffer.seconds = 300
9786 sasl.login.refresh.min.period.seconds = 60
9787 sasl.login.refresh.window.factor = 0.8
9788 sasl.login.refresh.window.jitter = 0.05
9789 sasl.login.retry.backoff.max.ms = 10000
9790 sasl.login.retry.backoff.ms = 100
9791 sasl.mechanism = GSSAPI
9792 sasl.oauthbearer.assertion.algorithm = RS256
9793 sasl.oauthbearer.assertion.claim.aud = null
9794 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9795 sasl.oauthbearer.assertion.claim.iss = null
9796 sasl.oauthbearer.assertion.claim.jti.include = false
9797 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9798 sasl.oauthbearer.assertion.claim.sub = null
9799 sasl.oauthbearer.assertion.file = null
9800 sasl.oauthbearer.assertion.private.key.file = null
9801 sasl.oauthbearer.assertion.private.key.passphrase = null
9802 sasl.oauthbearer.assertion.template.file = null
9803 sasl.oauthbearer.client.credentials.client.id = null
9804 sasl.oauthbearer.client.credentials.client.secret = null
9805 sasl.oauthbearer.clock.skew.seconds = 30
9806 sasl.oauthbearer.expected.audience = null
9807 sasl.oauthbearer.expected.issuer = null
9808 sasl.oauthbearer.header.urlencode = false
9809 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9810 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9811 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9812 sasl.oauthbearer.jwks.endpoint.url = null
9813 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9814 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9815 sasl.oauthbearer.scope = null
9816 sasl.oauthbearer.scope.claim.name = scope
9817 sasl.oauthbearer.sub.claim.name = sub
9818 sasl.oauthbearer.token.endpoint.url = null
9819 security.protocol = PLAINTEXT
9820 security.providers = null
9821 send.buffer.bytes = 131072
9822 socket.connection.setup.timeout.max.ms = 30000
9823 socket.connection.setup.timeout.ms = 10000
9824 ssl.cipher.suites = null
9825 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9826 ssl.endpoint.identification.algorithm = https
9827 ssl.engine.factory.class = null
9828 ssl.key.password = null
9829 ssl.keymanager.algorithm = SunX509
9830 ssl.keystore.certificate.chain = null
9831 ssl.keystore.key = null
9832 ssl.keystore.location = null
9833 ssl.keystore.password = null
9834 ssl.keystore.type = JKS
9835 ssl.protocol = TLSv1.3
9836 ssl.provider = null
9837 ssl.secure.random.implementation = null
9838 ssl.trustmanager.algorithm = PKIX
9839 ssl.truststore.certificates = null
9840 ssl.truststore.location = null
9841 ssl.truststore.password = null
9842 ssl.truststore.type = JKS
9843 transaction.timeout.ms = 60000
9844 transaction.two.phase.commit.enable = false
9845 transactional.id = null
9846 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9847
984812:06:25.749 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
984912:06:25.750 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Instantiated an idempotent producer.
985012:06:25.751 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
985112:06:25.751 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
985212:06:25.751 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327985751
985312:06:25.753 [data-plane-kafka-request-handler-6] INFO k.s.DefaultAutoTopicCreationManager - Sent auto-creation request for Set(t8_1) to the active controller.
985412:06:25.753 [kafka-producer-network-thread | producer-25] WARN o.a.k.c.NetworkClient - [Producer clientId=producer-25] The metadata response from the cluster reported a recoverable issue with correlation id 1 : {t8_1=UNKNOWN_TOPIC_OR_PARTITION}
985512:06:25.753 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.Metadata - [Producer clientId=producer-25] Cluster ID: cERjULLDRBGv7lPJWPu8sA
985612:06:25.753 [kafka-producer-network-thread | producer-25] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-25] ProducerId set to 24 with epoch 0
985712:06:25.754 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] CreateTopics result(s): CreatableTopic(name='t8_1', numPartitions=1, replicationFactor=1, assignments=[], configs=[]): SUCCESS
985812:06:25.754 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed TopicRecord for topic t8_1 with topic ID gqN0uggpTLuUIFTGU7mnNg.
985912:06:25.755 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] Replayed PartitionRecord for new partition t8_1-0 with topic ID gqN0uggpTLuUIFTGU7mnNg and PartitionRegistration(replicas=[0], directories=[1IEk1f33dz_GsvBzHQUJSQ], isr=[0], removingReplicas=[], addingReplicas=[], elr=[], lastKnownElr=[], leader=0, leaderRecoveryState=RECOVERED, leaderEpoch=0, partitionEpoch=0).
986012:06:25.780 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 1 partition(s) to local leaders.
986112:06:25.780 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(t8_1-0)
986212:06:25.781 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Creating new partition t8_1-0 with topic id gqN0uggpTLuUIFTGU7mnNg.
986312:06:25.783 [kafka-0-metadata-loader-event-handler] INFO o.a.k.s.i.l.UnifiedLog - [LogLoader partition=t8_1-0, dir=/tmp/kafka-logs4345019044203235659] Loading producer state till offset 0
986412:06:25.783 [kafka-0-metadata-loader-event-handler] INFO k.l.LogManager - Created log for partition t8_1-0 in /tmp/kafka-logs4345019044203235659/t8_1-0 with properties {}
986512:06:25.783 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] No checkpointed highwatermark is found for partition t8_1-0
986612:06:25.783 [kafka-0-metadata-loader-event-handler] INFO k.c.Partition - [Partition t8_1-0 broker=0] Log loaded for partition t8_1-0 with initial high watermark 0
986712:06:25.784 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Leader t8_1-0 with topic id Some(gqN0uggpTLuUIFTGU7mnNg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1.
986812:06:26.764 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-25] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
986912:06:26.765 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
987012:06:26.765 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
987112:06:26.765 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
987212:06:26.765 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
987312:06:26.765 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-25 unregistered
987412:06:26.766 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
9875 acks = -1
9876 batch.size = 16384
9877 bootstrap.servers = [localhost:6001]
9878 buffer.memory = 33554432
9879 client.dns.lookup = use_all_dns_ips
9880 client.id = producer-26
9881 compression.gzip.level = -1
9882 compression.lz4.level = 9
9883 compression.type = none
9884 compression.zstd.level = 3
9885 connections.max.idle.ms = 540000
9886 delivery.timeout.ms = 120000
9887 enable.idempotence = true
9888 enable.metrics.push = true
9889 interceptor.classes = []
9890 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
9891 linger.ms = 5
9892 max.block.ms = 10000
9893 max.in.flight.requests.per.connection = 5
9894 max.request.size = 1048576
9895 metadata.max.age.ms = 300000
9896 metadata.max.idle.ms = 300000
9897 metadata.recovery.rebootstrap.trigger.ms = 300000
9898 metadata.recovery.strategy = rebootstrap
9899 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
9900 metrics.num.samples = 2
9901 metrics.recording.level = INFO
9902 metrics.sample.window.ms = 30000
9903 partitioner.adaptive.partitioning.enable = true
9904 partitioner.availability.timeout.ms = 0
9905 partitioner.class = null
9906 partitioner.ignore.keys = false
9907 receive.buffer.bytes = 32768
9908 reconnect.backoff.max.ms = 1000
9909 reconnect.backoff.ms = 50
9910 request.timeout.ms = 30000
9911 retries = 2147483647
9912 retry.backoff.max.ms = 1000
9913 retry.backoff.ms = 1000
9914 sasl.client.callback.handler.class = null
9915 sasl.jaas.config = null
9916 sasl.kerberos.kinit.cmd = /usr/bin/kinit
9917 sasl.kerberos.min.time.before.relogin = 60000
9918 sasl.kerberos.service.name = null
9919 sasl.kerberos.ticket.renew.jitter = 0.05
9920 sasl.kerberos.ticket.renew.window.factor = 0.8
9921 sasl.login.callback.handler.class = null
9922 sasl.login.class = null
9923 sasl.login.connect.timeout.ms = null
9924 sasl.login.read.timeout.ms = null
9925 sasl.login.refresh.buffer.seconds = 300
9926 sasl.login.refresh.min.period.seconds = 60
9927 sasl.login.refresh.window.factor = 0.8
9928 sasl.login.refresh.window.jitter = 0.05
9929 sasl.login.retry.backoff.max.ms = 10000
9930 sasl.login.retry.backoff.ms = 100
9931 sasl.mechanism = GSSAPI
9932 sasl.oauthbearer.assertion.algorithm = RS256
9933 sasl.oauthbearer.assertion.claim.aud = null
9934 sasl.oauthbearer.assertion.claim.exp.seconds = 300
9935 sasl.oauthbearer.assertion.claim.iss = null
9936 sasl.oauthbearer.assertion.claim.jti.include = false
9937 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
9938 sasl.oauthbearer.assertion.claim.sub = null
9939 sasl.oauthbearer.assertion.file = null
9940 sasl.oauthbearer.assertion.private.key.file = null
9941 sasl.oauthbearer.assertion.private.key.passphrase = null
9942 sasl.oauthbearer.assertion.template.file = null
9943 sasl.oauthbearer.client.credentials.client.id = null
9944 sasl.oauthbearer.client.credentials.client.secret = null
9945 sasl.oauthbearer.clock.skew.seconds = 30
9946 sasl.oauthbearer.expected.audience = null
9947 sasl.oauthbearer.expected.issuer = null
9948 sasl.oauthbearer.header.urlencode = false
9949 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
9950 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
9951 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
9952 sasl.oauthbearer.jwks.endpoint.url = null
9953 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
9954 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
9955 sasl.oauthbearer.scope = null
9956 sasl.oauthbearer.scope.claim.name = scope
9957 sasl.oauthbearer.sub.claim.name = sub
9958 sasl.oauthbearer.token.endpoint.url = null
9959 security.protocol = PLAINTEXT
9960 security.providers = null
9961 send.buffer.bytes = 131072
9962 socket.connection.setup.timeout.max.ms = 30000
9963 socket.connection.setup.timeout.ms = 10000
9964 ssl.cipher.suites = null
9965 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
9966 ssl.endpoint.identification.algorithm = https
9967 ssl.engine.factory.class = null
9968 ssl.key.password = null
9969 ssl.keymanager.algorithm = SunX509
9970 ssl.keystore.certificate.chain = null
9971 ssl.keystore.key = null
9972 ssl.keystore.location = null
9973 ssl.keystore.password = null
9974 ssl.keystore.type = JKS
9975 ssl.protocol = TLSv1.3
9976 ssl.provider = null
9977 ssl.secure.random.implementation = null
9978 ssl.trustmanager.algorithm = PKIX
9979 ssl.truststore.certificates = null
9980 ssl.truststore.location = null
9981 ssl.truststore.password = null
9982 ssl.truststore.type = JKS
9983 transaction.timeout.ms = 60000
9984 transaction.two.phase.commit.enable = false
9985 transactional.id = null
9986 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
9987
998812:06:26.766 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
998912:06:26.766 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Instantiated an idempotent producer.
999012:06:26.768 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
999112:06:26.768 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
999212:06:26.768 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327986768
999312:06:26.770 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.Metadata - [Producer clientId=producer-26] Cluster ID: cERjULLDRBGv7lPJWPu8sA
999412:06:26.770 [kafka-producer-network-thread | producer-26] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-26] ProducerId set to 25 with epoch 0
999512:06:26.778 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-26] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
999612:06:26.779 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
999712:06:26.779 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
999812:06:26.779 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
999912:06:26.779 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1000012:06:26.779 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-26 unregistered
1000112:06:26.779 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10002 acks = -1
10003 batch.size = 16384
10004 bootstrap.servers = [localhost:6001]
10005 buffer.memory = 33554432
10006 client.dns.lookup = use_all_dns_ips
10007 client.id = producer-27
10008 compression.gzip.level = -1
10009 compression.lz4.level = 9
10010 compression.type = none
10011 compression.zstd.level = 3
10012 connections.max.idle.ms = 540000
10013 delivery.timeout.ms = 120000
10014 enable.idempotence = true
10015 enable.metrics.push = true
10016 interceptor.classes = []
10017 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10018 linger.ms = 5
10019 max.block.ms = 10000
10020 max.in.flight.requests.per.connection = 5
10021 max.request.size = 1048576
10022 metadata.max.age.ms = 300000
10023 metadata.max.idle.ms = 300000
10024 metadata.recovery.rebootstrap.trigger.ms = 300000
10025 metadata.recovery.strategy = rebootstrap
10026 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10027 metrics.num.samples = 2
10028 metrics.recording.level = INFO
10029 metrics.sample.window.ms = 30000
10030 partitioner.adaptive.partitioning.enable = true
10031 partitioner.availability.timeout.ms = 0
10032 partitioner.class = null
10033 partitioner.ignore.keys = false
10034 receive.buffer.bytes = 32768
10035 reconnect.backoff.max.ms = 1000
10036 reconnect.backoff.ms = 50
10037 request.timeout.ms = 30000
10038 retries = 2147483647
10039 retry.backoff.max.ms = 1000
10040 retry.backoff.ms = 1000
10041 sasl.client.callback.handler.class = null
10042 sasl.jaas.config = null
10043 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10044 sasl.kerberos.min.time.before.relogin = 60000
10045 sasl.kerberos.service.name = null
10046 sasl.kerberos.ticket.renew.jitter = 0.05
10047 sasl.kerberos.ticket.renew.window.factor = 0.8
10048 sasl.login.callback.handler.class = null
10049 sasl.login.class = null
10050 sasl.login.connect.timeout.ms = null
10051 sasl.login.read.timeout.ms = null
10052 sasl.login.refresh.buffer.seconds = 300
10053 sasl.login.refresh.min.period.seconds = 60
10054 sasl.login.refresh.window.factor = 0.8
10055 sasl.login.refresh.window.jitter = 0.05
10056 sasl.login.retry.backoff.max.ms = 10000
10057 sasl.login.retry.backoff.ms = 100
10058 sasl.mechanism = GSSAPI
10059 sasl.oauthbearer.assertion.algorithm = RS256
10060 sasl.oauthbearer.assertion.claim.aud = null
10061 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10062 sasl.oauthbearer.assertion.claim.iss = null
10063 sasl.oauthbearer.assertion.claim.jti.include = false
10064 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10065 sasl.oauthbearer.assertion.claim.sub = null
10066 sasl.oauthbearer.assertion.file = null
10067 sasl.oauthbearer.assertion.private.key.file = null
10068 sasl.oauthbearer.assertion.private.key.passphrase = null
10069 sasl.oauthbearer.assertion.template.file = null
10070 sasl.oauthbearer.client.credentials.client.id = null
10071 sasl.oauthbearer.client.credentials.client.secret = null
10072 sasl.oauthbearer.clock.skew.seconds = 30
10073 sasl.oauthbearer.expected.audience = null
10074 sasl.oauthbearer.expected.issuer = null
10075 sasl.oauthbearer.header.urlencode = false
10076 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10077 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10078 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10079 sasl.oauthbearer.jwks.endpoint.url = null
10080 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10081 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10082 sasl.oauthbearer.scope = null
10083 sasl.oauthbearer.scope.claim.name = scope
10084 sasl.oauthbearer.sub.claim.name = sub
10085 sasl.oauthbearer.token.endpoint.url = null
10086 security.protocol = PLAINTEXT
10087 security.providers = null
10088 send.buffer.bytes = 131072
10089 socket.connection.setup.timeout.max.ms = 30000
10090 socket.connection.setup.timeout.ms = 10000
10091 ssl.cipher.suites = null
10092 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10093 ssl.endpoint.identification.algorithm = https
10094 ssl.engine.factory.class = null
10095 ssl.key.password = null
10096 ssl.keymanager.algorithm = SunX509
10097 ssl.keystore.certificate.chain = null
10098 ssl.keystore.key = null
10099 ssl.keystore.location = null
10100 ssl.keystore.password = null
10101 ssl.keystore.type = JKS
10102 ssl.protocol = TLSv1.3
10103 ssl.provider = null
10104 ssl.secure.random.implementation = null
10105 ssl.trustmanager.algorithm = PKIX
10106 ssl.truststore.certificates = null
10107 ssl.truststore.location = null
10108 ssl.truststore.password = null
10109 ssl.truststore.type = JKS
10110 transaction.timeout.ms = 60000
10111 transaction.two.phase.commit.enable = false
10112 transactional.id = null
10113 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10114
1011512:06:26.779 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1011612:06:26.780 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Instantiated an idempotent producer.
1011712:06:26.781 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1011812:06:26.781 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1011912:06:26.781 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327986781
1012012:06:26.784 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.Metadata - [Producer clientId=producer-27] Cluster ID: cERjULLDRBGv7lPJWPu8sA
1012112:06:26.784 [kafka-producer-network-thread | producer-27] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-27] ProducerId set to 26 with epoch 0
1012212:06:26.792 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-27] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1012312:06:26.793 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1012412:06:26.793 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1012512:06:26.793 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1012612:06:26.794 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1012712:06:26.794 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-27 unregistered
1012812:06:26.794 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10129 acks = -1
10130 batch.size = 16384
10131 bootstrap.servers = [localhost:6001]
10132 buffer.memory = 33554432
10133 client.dns.lookup = use_all_dns_ips
10134 client.id = producer-28
10135 compression.gzip.level = -1
10136 compression.lz4.level = 9
10137 compression.type = none
10138 compression.zstd.level = 3
10139 connections.max.idle.ms = 540000
10140 delivery.timeout.ms = 120000
10141 enable.idempotence = true
10142 enable.metrics.push = true
10143 interceptor.classes = []
10144 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10145 linger.ms = 5
10146 max.block.ms = 10000
10147 max.in.flight.requests.per.connection = 5
10148 max.request.size = 1048576
10149 metadata.max.age.ms = 300000
10150 metadata.max.idle.ms = 300000
10151 metadata.recovery.rebootstrap.trigger.ms = 300000
10152 metadata.recovery.strategy = rebootstrap
10153 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10154 metrics.num.samples = 2
10155 metrics.recording.level = INFO
10156 metrics.sample.window.ms = 30000
10157 partitioner.adaptive.partitioning.enable = true
10158 partitioner.availability.timeout.ms = 0
10159 partitioner.class = null
10160 partitioner.ignore.keys = false
10161 receive.buffer.bytes = 32768
10162 reconnect.backoff.max.ms = 1000
10163 reconnect.backoff.ms = 50
10164 request.timeout.ms = 30000
10165 retries = 2147483647
10166 retry.backoff.max.ms = 1000
10167 retry.backoff.ms = 1000
10168 sasl.client.callback.handler.class = null
10169 sasl.jaas.config = null
10170 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10171 sasl.kerberos.min.time.before.relogin = 60000
10172 sasl.kerberos.service.name = null
10173 sasl.kerberos.ticket.renew.jitter = 0.05
10174 sasl.kerberos.ticket.renew.window.factor = 0.8
10175 sasl.login.callback.handler.class = null
10176 sasl.login.class = null
10177 sasl.login.connect.timeout.ms = null
10178 sasl.login.read.timeout.ms = null
10179 sasl.login.refresh.buffer.seconds = 300
10180 sasl.login.refresh.min.period.seconds = 60
10181 sasl.login.refresh.window.factor = 0.8
10182 sasl.login.refresh.window.jitter = 0.05
10183 sasl.login.retry.backoff.max.ms = 10000
10184 sasl.login.retry.backoff.ms = 100
10185 sasl.mechanism = GSSAPI
10186 sasl.oauthbearer.assertion.algorithm = RS256
10187 sasl.oauthbearer.assertion.claim.aud = null
10188 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10189 sasl.oauthbearer.assertion.claim.iss = null
10190 sasl.oauthbearer.assertion.claim.jti.include = false
10191 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10192 sasl.oauthbearer.assertion.claim.sub = null
10193 sasl.oauthbearer.assertion.file = null
10194 sasl.oauthbearer.assertion.private.key.file = null
10195 sasl.oauthbearer.assertion.private.key.passphrase = null
10196 sasl.oauthbearer.assertion.template.file = null
10197 sasl.oauthbearer.client.credentials.client.id = null
10198 sasl.oauthbearer.client.credentials.client.secret = null
10199 sasl.oauthbearer.clock.skew.seconds = 30
10200 sasl.oauthbearer.expected.audience = null
10201 sasl.oauthbearer.expected.issuer = null
10202 sasl.oauthbearer.header.urlencode = false
10203 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10204 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10205 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10206 sasl.oauthbearer.jwks.endpoint.url = null
10207 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10208 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10209 sasl.oauthbearer.scope = null
10210 sasl.oauthbearer.scope.claim.name = scope
10211 sasl.oauthbearer.sub.claim.name = sub
10212 sasl.oauthbearer.token.endpoint.url = null
10213 security.protocol = PLAINTEXT
10214 security.providers = null
10215 send.buffer.bytes = 131072
10216 socket.connection.setup.timeout.max.ms = 30000
10217 socket.connection.setup.timeout.ms = 10000
10218 ssl.cipher.suites = null
10219 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10220 ssl.endpoint.identification.algorithm = https
10221 ssl.engine.factory.class = null
10222 ssl.key.password = null
10223 ssl.keymanager.algorithm = SunX509
10224 ssl.keystore.certificate.chain = null
10225 ssl.keystore.key = null
10226 ssl.keystore.location = null
10227 ssl.keystore.password = null
10228 ssl.keystore.type = JKS
10229 ssl.protocol = TLSv1.3
10230 ssl.provider = null
10231 ssl.secure.random.implementation = null
10232 ssl.trustmanager.algorithm = PKIX
10233 ssl.truststore.certificates = null
10234 ssl.truststore.location = null
10235 ssl.truststore.password = null
10236 ssl.truststore.type = JKS
10237 transaction.timeout.ms = 60000
10238 transaction.two.phase.commit.enable = false
10239 transactional.id = null
10240 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10241
1024212:06:26.794 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1024312:06:26.795 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Instantiated an idempotent producer.
1024412:06:26.797 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1024512:06:26.797 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1024612:06:26.797 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327986797
1024712:06:26.798 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.Metadata - [Producer clientId=producer-28] Cluster ID: cERjULLDRBGv7lPJWPu8sA
1024812:06:26.798 [kafka-producer-network-thread | producer-28] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-28] ProducerId set to 27 with epoch 0
1024912:06:26.806 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-28] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1025012:06:26.807 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1025112:06:26.807 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1025212:06:26.807 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1025312:06:26.807 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1025412:06:26.808 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-28 unregistered
1025512:06:26.808 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.c.AbstractConfig - ProducerConfig values:
10256 acks = -1
10257 batch.size = 16384
10258 bootstrap.servers = [localhost:6001]
10259 buffer.memory = 33554432
10260 client.dns.lookup = use_all_dns_ips
10261 client.id = producer-29
10262 compression.gzip.level = -1
10263 compression.lz4.level = 9
10264 compression.type = none
10265 compression.zstd.level = 3
10266 connections.max.idle.ms = 540000
10267 delivery.timeout.ms = 120000
10268 enable.idempotence = true
10269 enable.metrics.push = true
10270 interceptor.classes = []
10271 key.serializer = class org.apache.kafka.common.serialization.StringSerializer
10272 linger.ms = 5
10273 max.block.ms = 10000
10274 max.in.flight.requests.per.connection = 5
10275 max.request.size = 1048576
10276 metadata.max.age.ms = 300000
10277 metadata.max.idle.ms = 300000
10278 metadata.recovery.rebootstrap.trigger.ms = 300000
10279 metadata.recovery.strategy = rebootstrap
10280 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10281 metrics.num.samples = 2
10282 metrics.recording.level = INFO
10283 metrics.sample.window.ms = 30000
10284 partitioner.adaptive.partitioning.enable = true
10285 partitioner.availability.timeout.ms = 0
10286 partitioner.class = null
10287 partitioner.ignore.keys = false
10288 receive.buffer.bytes = 32768
10289 reconnect.backoff.max.ms = 1000
10290 reconnect.backoff.ms = 50
10291 request.timeout.ms = 30000
10292 retries = 2147483647
10293 retry.backoff.max.ms = 1000
10294 retry.backoff.ms = 1000
10295 sasl.client.callback.handler.class = null
10296 sasl.jaas.config = null
10297 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10298 sasl.kerberos.min.time.before.relogin = 60000
10299 sasl.kerberos.service.name = null
10300 sasl.kerberos.ticket.renew.jitter = 0.05
10301 sasl.kerberos.ticket.renew.window.factor = 0.8
10302 sasl.login.callback.handler.class = null
10303 sasl.login.class = null
10304 sasl.login.connect.timeout.ms = null
10305 sasl.login.read.timeout.ms = null
10306 sasl.login.refresh.buffer.seconds = 300
10307 sasl.login.refresh.min.period.seconds = 60
10308 sasl.login.refresh.window.factor = 0.8
10309 sasl.login.refresh.window.jitter = 0.05
10310 sasl.login.retry.backoff.max.ms = 10000
10311 sasl.login.retry.backoff.ms = 100
10312 sasl.mechanism = GSSAPI
10313 sasl.oauthbearer.assertion.algorithm = RS256
10314 sasl.oauthbearer.assertion.claim.aud = null
10315 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10316 sasl.oauthbearer.assertion.claim.iss = null
10317 sasl.oauthbearer.assertion.claim.jti.include = false
10318 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10319 sasl.oauthbearer.assertion.claim.sub = null
10320 sasl.oauthbearer.assertion.file = null
10321 sasl.oauthbearer.assertion.private.key.file = null
10322 sasl.oauthbearer.assertion.private.key.passphrase = null
10323 sasl.oauthbearer.assertion.template.file = null
10324 sasl.oauthbearer.client.credentials.client.id = null
10325 sasl.oauthbearer.client.credentials.client.secret = null
10326 sasl.oauthbearer.clock.skew.seconds = 30
10327 sasl.oauthbearer.expected.audience = null
10328 sasl.oauthbearer.expected.issuer = null
10329 sasl.oauthbearer.header.urlencode = false
10330 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10331 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10332 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10333 sasl.oauthbearer.jwks.endpoint.url = null
10334 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10335 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10336 sasl.oauthbearer.scope = null
10337 sasl.oauthbearer.scope.claim.name = scope
10338 sasl.oauthbearer.sub.claim.name = sub
10339 sasl.oauthbearer.token.endpoint.url = null
10340 security.protocol = PLAINTEXT
10341 security.providers = null
10342 send.buffer.bytes = 131072
10343 socket.connection.setup.timeout.max.ms = 30000
10344 socket.connection.setup.timeout.ms = 10000
10345 ssl.cipher.suites = null
10346 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10347 ssl.endpoint.identification.algorithm = https
10348 ssl.engine.factory.class = null
10349 ssl.key.password = null
10350 ssl.keymanager.algorithm = SunX509
10351 ssl.keystore.certificate.chain = null
10352 ssl.keystore.key = null
10353 ssl.keystore.location = null
10354 ssl.keystore.password = null
10355 ssl.keystore.type = JKS
10356 ssl.protocol = TLSv1.3
10357 ssl.provider = null
10358 ssl.secure.random.implementation = null
10359 ssl.trustmanager.algorithm = PKIX
10360 ssl.truststore.certificates = null
10361 ssl.truststore.location = null
10362 ssl.truststore.password = null
10363 ssl.truststore.type = JKS
10364 transaction.timeout.ms = 60000
10365 transaction.two.phase.commit.enable = false
10366 transactional.id = null
10367 value.serializer = class org.apache.kafka.common.serialization.StringSerializer
10368
1036912:06:26.809 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1037012:06:26.809 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Instantiated an idempotent producer.
1037112:06:26.811 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1037212:06:26.811 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1037312:06:26.811 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327986811
1037412:06:26.813 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.Metadata - [Producer clientId=producer-29] Cluster ID: cERjULLDRBGv7lPJWPu8sA
1037512:06:26.813 [kafka-producer-network-thread | producer-29] INFO o.a.k.c.p.i.TransactionManager - [Producer clientId=producer-29] ProducerId set to 28 with epoch 0
1037612:06:26.822 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.p.KafkaProducer - [Producer clientId=producer-29] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1037712:06:26.823 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1037812:06:26.823 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1037912:06:26.823 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1038012:06:26.823 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1038112:06:26.823 [pool-67-thread-1-ScalaTest-running-KafkaTest] INFO o.a.k.c.u.AppInfoParser - App info kafka.producer for producer-29 unregistered
1038212:06:26.824 [virtual-762] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10383 allow.auto.create.topics = true
10384 auto.commit.interval.ms = 5000
10385 auto.offset.reset = earliest
10386 bootstrap.servers = [localhost:6001]
10387 check.crcs = true
10388 client.dns.lookup = use_all_dns_ips
10389 client.id = consumer-g8_1-18
10390 client.rack =
10391 connections.max.idle.ms = 540000
10392 default.api.timeout.ms = 60000
10393 enable.auto.commit = false
10394 enable.metrics.push = true
10395 exclude.internal.topics = true
10396 fetch.max.bytes = 52428800
10397 fetch.max.wait.ms = 500
10398 fetch.min.bytes = 1
10399 group.id = g8_1
10400 group.instance.id = null
10401 group.protocol = classic
10402 group.remote.assignor = null
10403 heartbeat.interval.ms = 3000
10404 interceptor.classes = []
10405 internal.leave.group.on.close = true
10406 internal.throw.on.fetch.stable.offset.unsupported = false
10407 isolation.level = read_uncommitted
10408 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10409 max.partition.fetch.bytes = 1048576
10410 max.poll.interval.ms = 300000
10411 max.poll.records = 500
10412 metadata.max.age.ms = 300000
10413 metadata.recovery.rebootstrap.trigger.ms = 300000
10414 metadata.recovery.strategy = rebootstrap
10415 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10416 metrics.num.samples = 2
10417 metrics.recording.level = INFO
10418 metrics.sample.window.ms = 30000
10419 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10420 receive.buffer.bytes = 65536
10421 reconnect.backoff.max.ms = 1000
10422 reconnect.backoff.ms = 50
10423 request.timeout.ms = 30000
10424 retry.backoff.max.ms = 1000
10425 retry.backoff.ms = 100
10426 sasl.client.callback.handler.class = null
10427 sasl.jaas.config = null
10428 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10429 sasl.kerberos.min.time.before.relogin = 60000
10430 sasl.kerberos.service.name = null
10431 sasl.kerberos.ticket.renew.jitter = 0.05
10432 sasl.kerberos.ticket.renew.window.factor = 0.8
10433 sasl.login.callback.handler.class = null
10434 sasl.login.class = null
10435 sasl.login.connect.timeout.ms = null
10436 sasl.login.read.timeout.ms = null
10437 sasl.login.refresh.buffer.seconds = 300
10438 sasl.login.refresh.min.period.seconds = 60
10439 sasl.login.refresh.window.factor = 0.8
10440 sasl.login.refresh.window.jitter = 0.05
10441 sasl.login.retry.backoff.max.ms = 10000
10442 sasl.login.retry.backoff.ms = 100
10443 sasl.mechanism = GSSAPI
10444 sasl.oauthbearer.assertion.algorithm = RS256
10445 sasl.oauthbearer.assertion.claim.aud = null
10446 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10447 sasl.oauthbearer.assertion.claim.iss = null
10448 sasl.oauthbearer.assertion.claim.jti.include = false
10449 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10450 sasl.oauthbearer.assertion.claim.sub = null
10451 sasl.oauthbearer.assertion.file = null
10452 sasl.oauthbearer.assertion.private.key.file = null
10453 sasl.oauthbearer.assertion.private.key.passphrase = null
10454 sasl.oauthbearer.assertion.template.file = null
10455 sasl.oauthbearer.client.credentials.client.id = null
10456 sasl.oauthbearer.client.credentials.client.secret = null
10457 sasl.oauthbearer.clock.skew.seconds = 30
10458 sasl.oauthbearer.expected.audience = null
10459 sasl.oauthbearer.expected.issuer = null
10460 sasl.oauthbearer.header.urlencode = false
10461 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10462 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10463 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10464 sasl.oauthbearer.jwks.endpoint.url = null
10465 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10466 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10467 sasl.oauthbearer.scope = null
10468 sasl.oauthbearer.scope.claim.name = scope
10469 sasl.oauthbearer.sub.claim.name = sub
10470 sasl.oauthbearer.token.endpoint.url = null
10471 security.protocol = PLAINTEXT
10472 security.providers = null
10473 send.buffer.bytes = 131072
10474 session.timeout.ms = 45000
10475 share.acknowledgement.mode = implicit
10476 socket.connection.setup.timeout.max.ms = 30000
10477 socket.connection.setup.timeout.ms = 10000
10478 ssl.cipher.suites = null
10479 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10480 ssl.endpoint.identification.algorithm = https
10481 ssl.engine.factory.class = null
10482 ssl.key.password = null
10483 ssl.keymanager.algorithm = SunX509
10484 ssl.keystore.certificate.chain = null
10485 ssl.keystore.key = null
10486 ssl.keystore.location = null
10487 ssl.keystore.password = null
10488 ssl.keystore.type = JKS
10489 ssl.protocol = TLSv1.3
10490 ssl.provider = null
10491 ssl.secure.random.implementation = null
10492 ssl.trustmanager.algorithm = PKIX
10493 ssl.truststore.certificates = null
10494 ssl.truststore.location = null
10495 ssl.truststore.password = null
10496 ssl.truststore.type = JKS
10497 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10498
1049912:06:26.824 [virtual-762] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1050012:06:26.826 [virtual-762] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1050112:06:26.826 [virtual-762] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1050212:06:26.826 [virtual-762] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327986826
1050312:06:26.828 [virtual-763] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Subscribed to topic(s): t8_1
1050412:06:26.830 [virtual-763] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
1050512:06:26.831 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1050612:06:26.831 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1050712:06:26.833 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7 and requesting the member to rejoin with this id.
1050812:06:26.833 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7
1050912:06:26.833 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] (Re-)joining group
1051012:06:26.834 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7 joins group g8_1 in Empty state. Adding to the group now.
1051112:06:26.834 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7).
1051212:06:29.834 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 1 with 1 members.
1051312:06:29.834 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7', protocol='range'}
1051412:06:29.835 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Finished assignment for group at generation 1: {consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7=Assignment(partitions=[t8_1-0])}
1051512:06:29.835 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7 for group g8_1 for generation 1. The group has 1 members, 0 of which are static.
1051612:06:29.841 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7', protocol='range'}
1051712:06:29.842 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1051812:06:29.842 [virtual-763] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1051912:06:29.842 [virtual-763] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Found no committed offset for partition t8_1-0
1052012:06:29.845 [virtual-763] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1052112:06:29.846 [virtual-765] ERROR o.k.KafkaFlow$ - Exception when polling for records
10522ox.flow.FlowOps$$anon$1: abort take
1052312:06:29.856 [virtual-770] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1052412:06:29.856 [virtual-770] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Member consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1052512:06:29.856 [virtual-770] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1052612:06:29.856 [virtual-770] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-18, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1052712:06:29.857 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1052812:06:29.857 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_1-18-7bee5e7b-1a5b-4198-bf65-197755b929d7) members.).
1052912:06:29.857 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 2 is now empty.
1053012:06:30.348 [virtual-770] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1053112:06:30.348 [virtual-770] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1053212:06:30.348 [virtual-770] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1053312:06:30.348 [virtual-770] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1053412:06:30.349 [virtual-770] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-18 unregistered
1053512:06:30.350 [virtual-771] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10536 allow.auto.create.topics = true
10537 auto.commit.interval.ms = 5000
10538 auto.offset.reset = earliest
10539 bootstrap.servers = [localhost:6001]
10540 check.crcs = true
10541 client.dns.lookup = use_all_dns_ips
10542 client.id = consumer-g8_1-19
10543 client.rack =
10544 connections.max.idle.ms = 540000
10545 default.api.timeout.ms = 60000
10546 enable.auto.commit = false
10547 enable.metrics.push = true
10548 exclude.internal.topics = true
10549 fetch.max.bytes = 52428800
10550 fetch.max.wait.ms = 500
10551 fetch.min.bytes = 1
10552 group.id = g8_1
10553 group.instance.id = null
10554 group.protocol = classic
10555 group.remote.assignor = null
10556 heartbeat.interval.ms = 3000
10557 interceptor.classes = []
10558 internal.leave.group.on.close = true
10559 internal.throw.on.fetch.stable.offset.unsupported = false
10560 isolation.level = read_uncommitted
10561 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10562 max.partition.fetch.bytes = 1048576
10563 max.poll.interval.ms = 300000
10564 max.poll.records = 500
10565 metadata.max.age.ms = 300000
10566 metadata.recovery.rebootstrap.trigger.ms = 300000
10567 metadata.recovery.strategy = rebootstrap
10568 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10569 metrics.num.samples = 2
10570 metrics.recording.level = INFO
10571 metrics.sample.window.ms = 30000
10572 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10573 receive.buffer.bytes = 65536
10574 reconnect.backoff.max.ms = 1000
10575 reconnect.backoff.ms = 50
10576 request.timeout.ms = 30000
10577 retry.backoff.max.ms = 1000
10578 retry.backoff.ms = 100
10579 sasl.client.callback.handler.class = null
10580 sasl.jaas.config = null
10581 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10582 sasl.kerberos.min.time.before.relogin = 60000
10583 sasl.kerberos.service.name = null
10584 sasl.kerberos.ticket.renew.jitter = 0.05
10585 sasl.kerberos.ticket.renew.window.factor = 0.8
10586 sasl.login.callback.handler.class = null
10587 sasl.login.class = null
10588 sasl.login.connect.timeout.ms = null
10589 sasl.login.read.timeout.ms = null
10590 sasl.login.refresh.buffer.seconds = 300
10591 sasl.login.refresh.min.period.seconds = 60
10592 sasl.login.refresh.window.factor = 0.8
10593 sasl.login.refresh.window.jitter = 0.05
10594 sasl.login.retry.backoff.max.ms = 10000
10595 sasl.login.retry.backoff.ms = 100
10596 sasl.mechanism = GSSAPI
10597 sasl.oauthbearer.assertion.algorithm = RS256
10598 sasl.oauthbearer.assertion.claim.aud = null
10599 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10600 sasl.oauthbearer.assertion.claim.iss = null
10601 sasl.oauthbearer.assertion.claim.jti.include = false
10602 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10603 sasl.oauthbearer.assertion.claim.sub = null
10604 sasl.oauthbearer.assertion.file = null
10605 sasl.oauthbearer.assertion.private.key.file = null
10606 sasl.oauthbearer.assertion.private.key.passphrase = null
10607 sasl.oauthbearer.assertion.template.file = null
10608 sasl.oauthbearer.client.credentials.client.id = null
10609 sasl.oauthbearer.client.credentials.client.secret = null
10610 sasl.oauthbearer.clock.skew.seconds = 30
10611 sasl.oauthbearer.expected.audience = null
10612 sasl.oauthbearer.expected.issuer = null
10613 sasl.oauthbearer.header.urlencode = false
10614 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10615 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10616 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10617 sasl.oauthbearer.jwks.endpoint.url = null
10618 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10619 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10620 sasl.oauthbearer.scope = null
10621 sasl.oauthbearer.scope.claim.name = scope
10622 sasl.oauthbearer.sub.claim.name = sub
10623 sasl.oauthbearer.token.endpoint.url = null
10624 security.protocol = PLAINTEXT
10625 security.providers = null
10626 send.buffer.bytes = 131072
10627 session.timeout.ms = 45000
10628 share.acknowledgement.mode = implicit
10629 socket.connection.setup.timeout.max.ms = 30000
10630 socket.connection.setup.timeout.ms = 10000
10631 ssl.cipher.suites = null
10632 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10633 ssl.endpoint.identification.algorithm = https
10634 ssl.engine.factory.class = null
10635 ssl.key.password = null
10636 ssl.keymanager.algorithm = SunX509
10637 ssl.keystore.certificate.chain = null
10638 ssl.keystore.key = null
10639 ssl.keystore.location = null
10640 ssl.keystore.password = null
10641 ssl.keystore.type = JKS
10642 ssl.protocol = TLSv1.3
10643 ssl.provider = null
10644 ssl.secure.random.implementation = null
10645 ssl.trustmanager.algorithm = PKIX
10646 ssl.truststore.certificates = null
10647 ssl.truststore.location = null
10648 ssl.truststore.password = null
10649 ssl.truststore.type = JKS
10650 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10651
1065212:06:30.350 [virtual-771] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1065312:06:30.352 [virtual-771] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1065412:06:30.352 [virtual-771] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1065512:06:30.352 [virtual-771] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327990352
1065612:06:30.352 [virtual-774] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Subscribed to topic(s): t8_1
1065712:06:30.354 [virtual-774] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Cluster ID: cERjULLDRBGv7lPJWPu8sA
1065812:06:30.355 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1065912:06:30.355 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1066012:06:30.357 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_1 in Empty state. Created a new member id consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996 and requesting the member to rejoin with this id.
1066112:06:30.357 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: need to re-join with the given member-id: consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996
1066212:06:30.357 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] (Re-)joining group
1066312:06:30.357 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996 joins group g8_1 in Empty state. Adding to the group now.
1066412:06:30.357 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 2 (reason: Adding new member consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996).
1066512:06:33.358 [group-coordinator-event-processor-0] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_1 generation 3 with 1 members.
1066612:06:33.359 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully joined group with generation Generation{generationId=3, memberId='consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996', protocol='range'}
1066712:06:33.359 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Finished assignment for group at generation 3: {consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996=Assignment(partitions=[t8_1-0])}
1066812:06:33.360 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996 for group g8_1 for generation 3. The group has 1 members, 0 of which are static.
1066912:06:33.366 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Successfully synced group in generation Generation{generationId=3, memberId='consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996', protocol='range'}
1067012:06:33.367 [virtual-774] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1067112:06:33.367 [virtual-774] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Adding newly assigned partitions: [t8_1-0]
1067212:06:33.368 [virtual-774] INFO o.a.k.c.c.i.ConsumerUtils - Setting offset for partition t8_1-0 to the committed offset FetchPosition{offset=3, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}
1067312:06:33.371 [virtual-771] INFO o.a.k.c.c.AbstractConfig - ConsumerConfig values:
10674 allow.auto.create.topics = true
10675 auto.commit.interval.ms = 5000
10676 auto.offset.reset = earliest
10677 bootstrap.servers = [localhost:6001]
10678 check.crcs = true
10679 client.dns.lookup = use_all_dns_ips
10680 client.id = consumer-g8_2-20
10681 client.rack =
10682 connections.max.idle.ms = 540000
10683 default.api.timeout.ms = 60000
10684 enable.auto.commit = false
10685 enable.metrics.push = true
10686 exclude.internal.topics = true
10687 fetch.max.bytes = 52428800
10688 fetch.max.wait.ms = 500
10689 fetch.min.bytes = 1
10690 group.id = g8_2
10691 group.instance.id = null
10692 group.protocol = classic
10693 group.remote.assignor = null
10694 heartbeat.interval.ms = 3000
10695 interceptor.classes = []
10696 internal.leave.group.on.close = true
10697 internal.throw.on.fetch.stable.offset.unsupported = false
10698 isolation.level = read_uncommitted
10699 key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10700 max.partition.fetch.bytes = 1048576
10701 max.poll.interval.ms = 300000
10702 max.poll.records = 500
10703 metadata.max.age.ms = 300000
10704 metadata.recovery.rebootstrap.trigger.ms = 300000
10705 metadata.recovery.strategy = rebootstrap
10706 metric.reporters = [org.apache.kafka.common.metrics.JmxReporter]
10707 metrics.num.samples = 2
10708 metrics.recording.level = INFO
10709 metrics.sample.window.ms = 30000
10710 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
10711 receive.buffer.bytes = 65536
10712 reconnect.backoff.max.ms = 1000
10713 reconnect.backoff.ms = 50
10714 request.timeout.ms = 30000
10715 retry.backoff.max.ms = 1000
10716 retry.backoff.ms = 100
10717 sasl.client.callback.handler.class = null
10718 sasl.jaas.config = null
10719 sasl.kerberos.kinit.cmd = /usr/bin/kinit
10720 sasl.kerberos.min.time.before.relogin = 60000
10721 sasl.kerberos.service.name = null
10722 sasl.kerberos.ticket.renew.jitter = 0.05
10723 sasl.kerberos.ticket.renew.window.factor = 0.8
10724 sasl.login.callback.handler.class = null
10725 sasl.login.class = null
10726 sasl.login.connect.timeout.ms = null
10727 sasl.login.read.timeout.ms = null
10728 sasl.login.refresh.buffer.seconds = 300
10729 sasl.login.refresh.min.period.seconds = 60
10730 sasl.login.refresh.window.factor = 0.8
10731 sasl.login.refresh.window.jitter = 0.05
10732 sasl.login.retry.backoff.max.ms = 10000
10733 sasl.login.retry.backoff.ms = 100
10734 sasl.mechanism = GSSAPI
10735 sasl.oauthbearer.assertion.algorithm = RS256
10736 sasl.oauthbearer.assertion.claim.aud = null
10737 sasl.oauthbearer.assertion.claim.exp.seconds = 300
10738 sasl.oauthbearer.assertion.claim.iss = null
10739 sasl.oauthbearer.assertion.claim.jti.include = false
10740 sasl.oauthbearer.assertion.claim.nbf.seconds = 60
10741 sasl.oauthbearer.assertion.claim.sub = null
10742 sasl.oauthbearer.assertion.file = null
10743 sasl.oauthbearer.assertion.private.key.file = null
10744 sasl.oauthbearer.assertion.private.key.passphrase = null
10745 sasl.oauthbearer.assertion.template.file = null
10746 sasl.oauthbearer.client.credentials.client.id = null
10747 sasl.oauthbearer.client.credentials.client.secret = null
10748 sasl.oauthbearer.clock.skew.seconds = 30
10749 sasl.oauthbearer.expected.audience = null
10750 sasl.oauthbearer.expected.issuer = null
10751 sasl.oauthbearer.header.urlencode = false
10752 sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
10753 sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
10754 sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
10755 sasl.oauthbearer.jwks.endpoint.url = null
10756 sasl.oauthbearer.jwt.retriever.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
10757 sasl.oauthbearer.jwt.validator.class = class org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
10758 sasl.oauthbearer.scope = null
10759 sasl.oauthbearer.scope.claim.name = scope
10760 sasl.oauthbearer.sub.claim.name = sub
10761 sasl.oauthbearer.token.endpoint.url = null
10762 security.protocol = PLAINTEXT
10763 security.providers = null
10764 send.buffer.bytes = 131072
10765 session.timeout.ms = 45000
10766 share.acknowledgement.mode = implicit
10767 socket.connection.setup.timeout.max.ms = 30000
10768 socket.connection.setup.timeout.ms = 10000
10769 ssl.cipher.suites = null
10770 ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
10771 ssl.endpoint.identification.algorithm = https
10772 ssl.engine.factory.class = null
10773 ssl.key.password = null
10774 ssl.keymanager.algorithm = SunX509
10775 ssl.keystore.certificate.chain = null
10776 ssl.keystore.key = null
10777 ssl.keystore.location = null
10778 ssl.keystore.password = null
10779 ssl.keystore.type = JKS
10780 ssl.protocol = TLSv1.3
10781 ssl.provider = null
10782 ssl.secure.random.implementation = null
10783 ssl.trustmanager.algorithm = PKIX
10784 ssl.truststore.certificates = null
10785 ssl.truststore.location = null
10786 ssl.truststore.password = null
10787 ssl.truststore.type = JKS
10788 value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
10789
1079012:06:33.372 [virtual-771] INFO o.a.k.c.t.i.KafkaMetricsCollector - initializing Kafka metrics collector
1079112:06:33.374 [virtual-771] INFO o.a.k.c.u.AppInfoParser - Kafka version: 4.1.1
1079212:06:33.374 [virtual-771] INFO o.a.k.c.u.AppInfoParser - Kafka commitId: be816b82d25370ce
1079312:06:33.374 [virtual-771] INFO o.a.k.c.u.AppInfoParser - Kafka startTimeMs: 1764327993374
1079412:06:33.374 [virtual-778] INFO o.a.k.c.c.i.ClassicKafkaConsumer - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Subscribed to topic(s): t8_1
1079512:06:33.376 [virtual-778] INFO o.a.k.c.Metadata - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Cluster ID: cERjULLDRBGv7lPJWPu8sA
1079612:06:33.377 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Discovered group coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false)
1079712:06:33.378 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1079812:06:33.379 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Dynamic member with unknown member id joins group g8_2 in Empty state. Created a new member id consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466 and requesting the member to rejoin with this id.
1079912:06:33.380 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: need to re-join with the given member-id: consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466
1080012:06:33.380 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] (Re-)joining group
1080112:06:33.380 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Pending dynamic member with id consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466 joins group g8_2 in Empty state. Adding to the group now.
1080212:06:33.381 [group-coordinator-event-processor-2] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 0 (reason: Adding new member consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466 with group instance id null; client reason: need to re-join with the given member-id: consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466).
1080312:06:36.381 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Stabilized group g8_2 generation 1 with 1 members.
1080412:06:36.382 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully joined group with generation Generation{generationId=1, memberId='consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466', protocol='range'}
1080512:06:36.382 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Finished assignment for group at generation 1: {consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466=Assignment(partitions=[t8_1-0])}
1080612:06:36.382 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Assignment received from leader consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466 for group g8_2 for generation 1. The group has 1 members, 0 of which are static.
1080712:06:36.389 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Successfully synced group in generation Generation{generationId=1, memberId='consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466', protocol='range'}
1080812:06:36.389 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Notifying assignor about the new Assignment(partitions=[t8_1-0])
1080912:06:36.389 [virtual-778] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Adding newly assigned partitions: [t8_1-0]
1081012:06:36.390 [virtual-778] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Found no committed offset for partition t8_1-0
1081112:06:36.392 [virtual-778] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting offset for partition t8_1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[localhost:6001 (id: 0 rack: null isFenced: false)], epoch=0}}.
1081212:06:36.393 [virtual-777] ERROR o.k.KafkaFlow$ - Exception when polling for records
10813java.lang.InterruptedException: null
10814 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10815 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10816 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10817 at ox.channels.ActorRef.ask(actor.scala:64)
10818 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10819 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10820 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10821 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10822 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10823 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10824 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10825 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10826 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10827 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1082812:06:36.393 [virtual-774] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10829java.lang.InterruptedException: null
10830 ... 18 common frames omitted
10831Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10832 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10833 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10834 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10835 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10836 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10837 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10838 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10839 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10840 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10841 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10842 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10843 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10844 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10845 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10846 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10847 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10848 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10849 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1085012:06:36.393 [virtual-778] ERROR o.k.KafkaConsumerWrapper$ - Exception when polling for records in Kafka
10851java.lang.InterruptedException: null
10852 ... 18 common frames omitted
10853Wrapped by: org.apache.kafka.common.errors.InterruptException: java.lang.InterruptedException
10854 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.maybeThrowInterruptException(ConsumerNetworkClient.java:537)
10855 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:298)
10856 at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:253)
10857 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.pollForFetches(ClassicKafkaConsumer.java:715)
10858 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:646)
10859 at org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.poll(ClassicKafkaConsumer.java:625)
10860 at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:895)
10861 at ox.kafka.KafkaConsumerWrapper$$anon$1.poll(KafkaConsumerWrapper.scala:32)
10862 at ox.kafka.KafkaFlow$.$anonfun$1(KafkaFlow.scala:40)
10863 at ox.channels.ActorRef.ask$$anonfun$1(actor.scala:54)
10864 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10865 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10866 at ox.channels.Actor$.create$$anonfun$1(actor.scala:30)
10867 at ox.fork$package$.forkError$$anonfun$1(fork.scala:46)
10868 at ox.fork$package$.forkError$$anonfun$adapted$1(fork.scala:60)
10869 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10870 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10871 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1087212:06:36.393 [virtual-773] ERROR o.k.KafkaFlow$ - Exception when polling for records
10873java.lang.InterruptedException: null
10874 at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:386)
10875 at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073)
10876 at ox.channels.ActorRef.f$proxy4$1(actor.scala:64)
10877 at ox.channels.ActorRef.ask(actor.scala:64)
10878 at ox.kafka.KafkaFlow$.doSubscribe(KafkaFlow.scala:40)
10879 at ox.kafka.KafkaFlow$.subscribe$$anonfun$1$$anonfun$1(KafkaFlow.scala:25)
10880 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:15)
10881 at scala.runtime.function.JProcedure1.apply(JProcedure1.java:10)
10882 at ox.supervised$package$.$anonfun$2(supervised.scala:53)
10883 at ox.fork$package$.forkUserError$$anonfun$1(fork.scala:96)
10884 at ox.fork$package$.forkUserError$$anonfun$adapted$1(fork.scala:107)
10885 at scala.Function0.apply$mcV$sp(Function0.scala:45)
10886 at ox.internal.ThreadHerd.$anonfun$1(ThreadHerd.scala:29)
10887 at java.base/java.lang.VirtualThread.run(VirtualThread.java:311)
1088812:06:36.394 [virtual-780] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Revoke previously assigned partitions [t8_1-0]
1088912:06:36.394 [virtual-780] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Member consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1089012:06:36.394 [virtual-781] INFO o.a.k.c.c.i.ConsumerRebalanceListenerInvoker - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Revoke previously assigned partitions [t8_1-0]
1089112:06:36.394 [virtual-781] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Member consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466 sending LeaveGroup request to coordinator localhost:6001 (id: 2147483647 rack: null isFenced: false) due to the consumer is being closed
1089212:06:36.394 [virtual-780] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Resetting generation and member id due to: consumer pro-actively leaving the group
1089312:06:36.394 [virtual-780] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_1-19, groupId=g8_1] Request joining group due to: consumer pro-actively leaving the group
1089412:06:36.394 [virtual-781] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Resetting generation and member id due to: consumer pro-actively leaving the group
1089512:06:36.394 [virtual-781] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-g8_2-20, groupId=g8_2] Request joining group due to: consumer pro-actively leaving the group
1089612:06:36.394 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_2] Member consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1089712:06:36.394 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_2 in state PreparingRebalance with old generation 1 (reason: explicit `LeaveGroup` request for (consumer-g8_2-20-bf31842e-7cfd-4ba8-81e7-e40a58f1e466) members.).
1089812:06:36.394 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_2 with generation 2 is now empty.
1089912:06:36.395 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [Group g8_1] Member consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996 has left group through explicit `LeaveGroup` request; client reason: the consumer is being closed
1090012:06:36.395 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Preparing to rebalance group g8_1 in state PreparingRebalance with old generation 3 (reason: explicit `LeaveGroup` request for (consumer-g8_1-19-b8a11db0-87ab-4dab-b4d4-b936ebd06996) members.).
1090112:06:36.395 [group-coordinator-event-processor-1] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] Group g8_1 with generation 4 is now empty.
1090212:06:36.880 [virtual-780] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1090312:06:36.880 [virtual-780] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1090412:06:36.881 [virtual-780] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1090512:06:36.881 [virtual-780] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1090612:06:36.882 [virtual-780] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_1-19 unregistered
1090712:06:36.893 [virtual-781] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1090812:06:36.893 [virtual-781] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1090912:06:36.893 [virtual-781] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter
1091012:06:36.893 [virtual-781] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1091112:06:36.895 [virtual-781] INFO o.a.k.c.u.AppInfoParser - App info kafka.consumer for consumer-g8_2-20 unregistered
1091212:06:36.897 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from STARTED to SHUTTING_DOWN
1091312:06:36.898 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] shutting down
1091412:06:36.899 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Beginning controlled shutdown.
1091512:06:36.899 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] Unfenced broker 0 has requested and been granted a controlled shutdown.
1091612:06:36.903 [quorum-controller-0-event-handler] INFO o.a.k.c.ReplicationControlManager - [QuorumController id=0] enterControlledShutdown[0]: changing 11 partition(s)
1091712:06:36.903 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=0, inControlledShutdown=1, logDirs=[])
1091812:06:36.929 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The broker is in PENDING_CONTROLLED_SHUTDOWN state, still waiting for the active controller.
1091912:06:36.929 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Transitioning 11 partition(s) to local followers.
1092012:06:36.935 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t6_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092112:06:36.935 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092212:06:36.935 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t5_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092312:06:36.935 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t4-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092412:06:36.936 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower __consumer_offsets-0 starts at leader epoch 1 from offset 1056 with partition epoch 1 and high watermark 1056. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092512:06:36.936 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t8_1-0 starts at leader epoch 1 from offset 5 with partition epoch 1 and high watermark 5. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092612:06:36.936 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t7_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092712:06:36.936 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_2-0 starts at leader epoch 1 from offset 3 with partition epoch 1 and high watermark 3. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092812:06:36.936 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t3_1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1092912:06:36.936 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t1-0 starts at leader epoch 1 from offset 4 with partition epoch 1 and high watermark 4. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1093012:06:36.936 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Follower t2-0 starts at leader epoch 1 from offset 1000 with partition epoch 1 and high watermark 1000. Current leader is -1. Previous leader Some(-1) and previous leader epoch was 1.
1093112:06:36.938 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1093212:06:36.938 [kafka-0-metadata-loader-event-handler] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] Removed fetcher for partitions HashSet(t2-0, t6_1-0, t3_1-0, t3_2-0, t8_1-0, t5_2-0, t1-0, __consumer_offsets-0, t5_1-0, t7_1-0, t4-0)
1093312:06:36.939 [kafka-0-metadata-loader-event-handler] INFO s.c.logger - [Broker id=0] Stopped fetchers as part of controlled shutdown for 11 partitions
1093412:06:36.940 [kafka-0-metadata-loader-event-handler] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Scheduling unloading of metadata for __consumer_offsets-0 with epoch OptionalInt[1]
1093512:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Started unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1093612:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_1] Unloading group metadata for generation 4.
1093712:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_2] Unloading group metadata for generation 2.
1093812:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_1] Unloading group metadata for generation 4.
1093912:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_1] Unloading group metadata for generation 4.
1094012:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_1] Unloading group metadata for generation 4.
1094112:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g3_1] Unloading group metadata for generation 4.
1094212:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g1] Unloading group metadata for generation 2.
1094312:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g8_2] Unloading group metadata for generation 2.
1094412:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g7_2] Unloading group metadata for generation 2.
1094512:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g6_2] Unloading group metadata for generation 2.
1094612:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=embedded-kafka-spec] Unloading group metadata for generation 4.
1094712:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.g.GroupMetadataManager - [GroupCoordinator id=0 topic=__consumer_offsets partition=0] [GroupId=g5_2] Unloading group metadata for generation 2.
1094812:06:36.940 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Finished unloading metadata for __consumer_offsets-0 with epoch OptionalInt[1].
1094912:06:36.980 [quorum-controller-0-event-handler] INFO o.a.k.c.BrokerHeartbeatManager - [QuorumController id=0] The request from broker 0 to shut down has been granted since the lowest active offset 9223372036854775807 is now greater than the broker's controlled shutdown offset 217.
1095012:06:36.981 [quorum-controller-0-event-handler] INFO o.a.k.c.ClusterControlManager - [QuorumController id=0] Replayed BrokerRegistrationChangeRecord modifying the registration for broker 0: BrokerRegistrationChangeRecord(brokerId=0, brokerEpoch=5, fenced=1, inControlledShutdown=0, logDirs=[])
1095112:06:37.007 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] The controller has asked us to exit controlled shutdown.
1095212:06:37.007 [broker-0-lifecycle-manager-event-handler] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] beginShutdown: shutting down event queue.
1095312:06:37.008 [broker-0-lifecycle-manager-event-handler] INFO k.s.BrokerLifecycleManager - [BrokerLifecycleManager id=0] Transitioning from PENDING_CONTROLLED_SHUTDOWN to SHUTTING_DOWN.
1095412:06:37.008 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutting down
1095512:06:37.008 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopping socket server request processors
1095612:06:37.009 [broker-0-to-controller-heartbeat-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Stopped
1095712:06:37.010 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-heartbeat-channel-manager]: Shutdown completed
1095812:06:37.012 [broker-0-lifecycle-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for heartbeat shutdown
1095912:06:37.012 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Stopped socket server request processors
1096012:06:37.013 [pool-67-thread-1] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shutting down
1096112:06:37.014 [pool-67-thread-1] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Broker 0] shut down completely
1096212:06:37.014 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1096312:06:37.015 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1096412:06:37.015 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1096512:06:37.016 [pool-67-thread-1] INFO k.s.KafkaApis - [KafkaApi-0] Shutdown complete.
1096612:06:37.018 [pool-67-thread-1] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutting down.
1096712:06:37.018 [pool-67-thread-1] INFO k.c.t.TransactionStateManager - [Transaction State Manager 0]: Shutdown complete
1096812:06:37.018 [pool-67-thread-1] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutting down
1096912:06:37.019 [TxnMarkerSenderThread-0] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Stopped
1097012:06:37.019 [pool-67-thread-1] INFO k.c.t.TransactionMarkerChannelManager - [TxnMarkerSenderThread-0]: Shutdown completed
1097112:06:37.020 [pool-67-thread-1] INFO k.c.t.TransactionCoordinator - [TransactionCoordinator id=0] Shutdown complete.
1097212:06:37.021 [pool-67-thread-1] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutting down.
1097312:06:37.021 [pool-67-thread-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Closing coordinator runtime.
1097412:06:37.021 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutting down
1097512:06:37.022 [group-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Stopped
1097612:06:37.022 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [group-coordinator-reaper]: Shutdown completed
1097712:06:37.022 [pool-67-thread-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Shutting down event processor.
1097812:06:37.022 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutting down. Draining the remaining events.
1097912:06:37.022 [group-coordinator-event-processor-2] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-2]: Shutdown completed
1098012:06:37.023 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutting down. Draining the remaining events.
1098112:06:37.023 [group-coordinator-event-processor-3] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-3]: Shutdown completed
1098212:06:37.023 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutting down. Draining the remaining events.
1098312:06:37.023 [group-coordinator-event-processor-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-1]: Shutdown completed
1098412:06:37.024 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1098512:06:37.024 [group-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [group-coordinator-event-processor-0]: Shutdown completed
1098612:06:37.024 [pool-67-thread-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [GroupCoordinator id=0] Event processor closed.
1098712:06:37.025 [pool-67-thread-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [GroupCoordinator id=0] Coordinator runtime closed.
1098812:06:37.026 [pool-67-thread-1] INFO o.a.k.c.g.GroupCoordinatorService - [GroupCoordinator id=0] Shutdown complete.
1098912:06:37.026 [pool-67-thread-1] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutting down.
1099012:06:37.026 [pool-67-thread-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Closing coordinator runtime.
1099112:06:37.026 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutting down
1099212:06:37.027 [share-coordinator-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Stopped
1099312:06:37.027 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-coordinator-reaper]: Shutdown completed
1099412:06:37.027 [pool-67-thread-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Shutting down event processor.
1099512:06:37.028 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutting down. Draining the remaining events.
1099612:06:37.028 [share-coordinator-event-processor-0] INFO o.a.k.c.c.r.MultiThreadedEventProcessor$EventProcessorThread - [share-coordinator-event-processor-0]: Shutdown completed
1099712:06:37.028 [pool-67-thread-1] INFO o.a.k.c.c.r.MultiThreadedEventProcessor - [ShareCoordinator id=0] Event processor closed.
1099812:06:37.029 [pool-67-thread-1] INFO o.a.k.c.c.r.CoordinatorRuntime - [ShareCoordinator id=0] Coordinator runtime closed.
1099912:06:37.029 [pool-67-thread-1] INFO o.a.k.c.s.ShareCoordinatorService - [ShareCoordinator id=0] Shutdown complete.
1100012:06:37.030 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]KafkaEventQueue#close: shutting down event queue.
1100112:06:37.030 [broker-0-directory-assignments-manager-event-handler] INFO o.a.k.s.AssignmentsManager - [AssignmentsManager id=0] shutting down.
1100212:06:37.030 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutting down
1100312:06:37.030 [broker-0-to-controller-directory-assignments-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Stopped
1100412:06:37.030 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-directory-assignments-channel-manager]: Shutdown completed
1100512:06:37.030 [broker-0-directory-assignments-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for directory-assignments shutdown
1100612:06:37.030 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [AssignmentsManager id=0]closed event queue.
1100712:06:37.031 [pool-67-thread-1] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shutting down
1100812:06:37.031 [pool-67-thread-1] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutting down
1100912:06:37.031 [LogDirFailureHandler] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Stopped
1101012:06:37.031 [pool-67-thread-1] INFO k.s.ReplicaManager$LogDirFailureHandler - [LogDirFailureHandler]: Shutdown completed
1101112:06:37.032 [pool-67-thread-1] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutting down
1101212:06:37.033 [pool-67-thread-1] INFO k.s.ReplicaFetcherManager - [ReplicaFetcherManager on broker 0] shutdown completed
1101312:06:37.033 [pool-67-thread-1] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutting down
1101412:06:37.033 [pool-67-thread-1] INFO k.s.ReplicaAlterLogDirsManager - [ReplicaAlterLogDirsManager on broker 0] shutdown completed
1101512:06:37.033 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutting down
1101612:06:37.033 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Shutdown completed
1101712:06:37.033 [ExpirationReaper-0-Fetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Fetch]: Stopped
1101812:06:37.034 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutting down
1101912:06:37.034 [ExpirationReaper-0-RemoteFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Stopped
1102012:06:37.035 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteFetch]: Shutdown completed
1102112:06:37.035 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutting down
1102212:06:37.036 [ExpirationReaper-0-RemoteListOffsets] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Stopped
1102312:06:37.037 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-RemoteListOffsets]: Shutdown completed
1102412:06:37.037 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutting down
1102512:06:37.039 [ExpirationReaper-0-Produce] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Stopped
1102612:06:37.040 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-Produce]: Shutdown completed
1102712:06:37.040 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutting down
1102812:06:37.040 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Shutdown completed
1102912:06:37.040 [ExpirationReaper-0-DeleteRecords] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-DeleteRecords]: Stopped
1103012:06:37.041 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutting down
1103112:06:37.041 [ExpirationReaper-0-ShareFetch] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Stopped
1103212:06:37.042 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-ShareFetch]: Shutdown completed
1103312:06:37.045 [pool-67-thread-1] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutting down
1103412:06:37.046 [AddPartitionsToTxnSenderThread-0] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Stopped
1103512:06:37.046 [pool-67-thread-1] INFO o.a.k.s.t.AddPartitionsToTxnManager - [AddPartitionsToTxnSenderThread-0]: Shutdown completed
1103612:06:37.047 [pool-67-thread-1] INFO k.s.ReplicaManager - [ReplicaManager broker=0] Shut down completely
1103712:06:37.047 [pool-67-thread-1] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutting down
1103812:06:37.047 [broker-0-to-controller-alter-partition-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Stopped
1103912:06:37.048 [pool-67-thread-1] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-alter-partition-channel-manager]: Shutdown completed
1104012:06:37.048 [pool-67-thread-1] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for alter-partition shutdown
1104112:06:37.048 [pool-67-thread-1] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutting down
1104212:06:37.049 [broker-0-to-controller-forwarding-channel-manager] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Stopped
1104312:06:37.050 [pool-67-thread-1] INFO k.s.NodeToControllerRequestThread - [broker-0-to-controller-forwarding-channel-manager]: Shutdown completed
1104412:06:37.050 [pool-67-thread-1] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for forwarding shutdown
1104512:06:37.051 [pool-67-thread-1] INFO k.l.LogManager - Shutting down.
1104612:06:37.052 [pool-67-thread-1] INFO o.a.k.s.i.l.LogCleaner - Shutting down the log cleaner.
1104712:06:37.052 [pool-67-thread-1] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutting down
1104812:06:37.053 [pool-67-thread-1] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Shutdown completed
1104912:06:37.053 [kafka-log-cleaner-thread-0] INFO o.a.k.s.i.l.LogCleaner$CleanerThread - [kafka-log-cleaner-thread-0]: Stopped
1105012:06:37.058 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 2 ms.
1105112:06:37.058 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_2-0] Wrote producer snapshot at offset 3 with 1 producer ids in 2 ms.
1105212:06:37.061 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__consumer_offsets-0] Wrote producer snapshot at offset 1056 with 0 producer ids in 1 ms.
1105312:06:37.061 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t2-0] Wrote producer snapshot at offset 1000 with 1 producer ids in 1 ms.
1105412:06:37.063 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t3_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105512:06:37.063 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t5_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 2 ms.
1105612:06:37.064 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t7_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1105712:06:37.065 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t8_1-0] Wrote producer snapshot at offset 5 with 5 producer ids in 1 ms.
1105812:06:37.065 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t4-0] Wrote producer snapshot at offset 3 with 1 producer ids in 1 ms.
1105912:06:37.067 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 1 ms.
1106012:06:37.067 [log-closing-/tmp/kafka-logs4345019044203235659] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=t6_1-0] Wrote producer snapshot at offset 4 with 4 producer ids in 2 ms.
1106112:06:37.153 [pool-67-thread-1] INFO k.l.LogManager - Shutdown complete.
1106212:06:37.154 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutting down
1106312:06:37.155 [broker-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Stopped
1106412:06:37.156 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1106512:06:37.156 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutting down
1106612:06:37.156 [broker-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Stopped
1106712:06:37.157 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Produce]: Shutdown completed
1106812:06:37.157 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutting down
1106912:06:37.157 [broker-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Stopped
1107012:06:37.157 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-Request]: Shutdown completed
1107112:06:37.157 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1107212:06:37.158 [broker-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1107312:06:37.158 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [broker-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1107412:06:37.160 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutting down socket server
1107512:06:37.177 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=BROKER, nodeId=0] Shutdown completed
1107612:06:37.178 [pool-67-thread-1] INFO o.a.k.s.l.m.BrokerTopicStats - Broker and topic stats closed
1107712:06:37.178 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutting down
1107812:06:37.178 [share-group-lock-timeout-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Stopped
1107912:06:37.179 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [share-group-lock-timeout-reaper]: Shutdown completed
1108012:06:37.180 [pool-67-thread-1] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutting down
1108112:06:37.181 [PersisterStateManager] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Stopped
1108212:06:37.181 [pool-67-thread-1] INFO o.a.k.s.s.p.PersisterStateManager$SendThread - [PersisterStateManager]: Shutdown completed
1108312:06:37.181 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutting down
1108412:06:37.181 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Shutdown completed
1108512:06:37.181 [persister-state-manager-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [persister-state-manager-reaper]: Stopped
1108612:06:37.183 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [BrokerLifecycleManager id=0] closed event queue.
1108712:06:37.184 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutting down
1108812:06:37.184 [client-metrics-reaper] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Stopped
1108912:06:37.184 [pool-67-thread-1] INFO o.a.k.s.u.t.SystemTimerReaper$Reaper - [client-metrics-reaper]: Shutdown completed
1109012:06:37.184 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] shut down completed
1109112:06:37.185 [pool-67-thread-1] INFO k.s.BrokerServer - [BrokerServer id=0] Transition from SHUTTING_DOWN to SHUTDOWN
1109212:06:37.185 [pool-67-thread-1] INFO k.s.ControllerServer - [ControllerServer id=0] shutting down
1109312:06:37.186 [pool-67-thread-1] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutting down
1109412:06:37.327 [raft-expiration-reaper] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Stopped
1109512:06:37.327 [pool-67-thread-1] INFO o.a.k.r.TimingWheelExpirationService$ExpiredOperationReaper - [raft-expiration-reaper]: Shutdown completed
1109612:06:37.328 [pool-67-thread-1] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutting down
1109712:06:37.328 [pool-67-thread-1] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Beginning graceful shutdown
1109812:06:37.329 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClient - [RaftManager id=0] Graceful shutdown completed
1109912:06:37.329 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [RaftManager id=0] Completed graceful shutdown of RaftClient
1110012:06:37.329 [kafka-0-raft-io-thread] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Stopped
1110112:06:37.329 [pool-67-thread-1] INFO o.a.k.r.KafkaRaftClientDriver - [kafka-0-raft-io-thread]: Shutdown completed
1110212:06:37.330 [pool-67-thread-1] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutting down
1110312:06:37.331 [kafka-0-raft-outbound-request-thread] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Stopped
1110412:06:37.331 [pool-67-thread-1] INFO o.a.k.r.KafkaNetworkChannel$SendThread - [kafka-0-raft-outbound-request-thread]: Shutdown completed
1110512:06:37.333 [pool-67-thread-1] INFO o.a.k.s.i.l.ProducerStateManager - [ProducerStateManager partition=__cluster_metadata-0] Wrote producer snapshot at offset 220 with 0 producer ids in 1 ms.
1110612:06:37.335 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] beginShutdown: shutting down event queue.
1110712:06:37.335 [controller-0-registration-manager-event-handler] INFO k.s.ControllerRegistrationManager - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] shutting down.
1110812:06:37.335 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutting down
1110912:06:37.335 [controller-0-to-controller-registration-channel-manager] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Stopped
1111012:06:37.335 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1111112:06:37.336 [controller-0-registration-manager-event-handler] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1111212:06:37.336 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] closed event queue.
1111312:06:37.336 [pool-67-thread-1] INFO k.s.NodeToControllerRequestThread - [controller-0-to-controller-registration-channel-manager]: Shutdown completed
1111412:06:37.337 [pool-67-thread-1] WARN o.a.k.c.NetworkClient - [NodeToControllerChannelManager id=0 name=registration] Attempting to close NetworkClient that has already been closed.
1111512:06:37.337 [pool-67-thread-1] INFO k.s.NodeToControllerChannelManagerImpl - Node to controller channel manager for registration shutdown
1111612:06:37.337 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [ControllerRegistrationManager id=0 incarnation=Fx8es18YQ563VpClOcTo3w] closed event queue.
1111712:06:37.339 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopping socket server request processors
1111812:06:37.340 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Stopped socket server request processors
1111912:06:37.341 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] QuorumController#beginShutdown: shutting down event queue.
1112012:06:37.342 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutting down socket server
1112112:06:37.342 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] writeNoOpRecord: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112212:06:37.342 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] maybeFenceStaleBroker: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112312:06:37.342 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] generatePeriodicPerformanceMessage: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112412:06:37.342 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electPreferred: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112512:06:37.342 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] electUnclean: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112612:06:37.342 [quorum-controller-0-event-handler] INFO o.a.k.c.QuorumController - [QuorumController id=0] expireDelegationTokens: event unable to start processing because of RejectedExecutionException (treated as TimeoutException). Exception message: The event queue is shutting down
1112712:06:37.347 [pool-67-thread-1] INFO k.n.SocketServer - [SocketServer listenerType=CONTROLLER, nodeId=0] Shutdown completed
1112812:06:37.347 [pool-67-thread-1] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shutting down
1112912:06:37.349 [pool-67-thread-1] INFO k.s.KafkaRequestHandlerPool - [data-plane Kafka Request Handler on Controller 0] shut down completely
1113012:06:37.350 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutting down
1113112:06:37.350 [ExpirationReaper-0-AlterAcls] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Stopped
1113212:06:37.351 [pool-67-thread-1] INFO o.a.k.s.p.DelayedOperationPurgatory$ExpiredOperationReaper - [ExpirationReaper-0-AlterAcls]: Shutdown completed
1113312:06:37.352 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutting down
1113412:06:37.352 [controller-0-ThrottledChannelReaper-Fetch] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Stopped
1113512:06:37.352 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Fetch]: Shutdown completed
1113612:06:37.352 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutting down
1113712:06:37.352 [controller-0-ThrottledChannelReaper-Produce] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Stopped
1113812:06:37.352 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Produce]: Shutdown completed
1113912:06:37.352 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutting down
1114012:06:37.352 [controller-0-ThrottledChannelReaper-Request] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Stopped
1114112:06:37.353 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-Request]: Shutdown completed
1114212:06:37.353 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutting down
1114312:06:37.353 [pool-67-thread-1] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Shutdown completed
1114412:06:37.353 [controller-0-ThrottledChannelReaper-ControllerMutation] INFO k.s.ClientQuotaManager$ThrottledChannelReaper - [controller-0-ThrottledChannelReaper-ControllerMutation]: Stopped
1114512:06:37.353 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [QuorumController id=0] closed event queue.
1114612:06:37.354 [pool-67-thread-1] INFO k.s.SharedServer - [SharedServer id=0] Stopping SharedServer
1114712:06:37.355 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] beginShutdown: shutting down event queue.
1114812:06:37.355 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] close: shutting down event queue.
1114912:06:37.355 [kafka-0-metadata-loader-event-handler] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1115012:06:37.356 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [MetadataLoader id=0] closed event queue.
1115112:06:37.357 [pool-67-thread-1] INFO o.a.k.q.KafkaEventQueue - [SnapshotGenerator id=0] closed event queue.
1115212:06:37.358 [pool-67-thread-1] INFO o.a.k.c.m.Metrics - Metrics scheduler closed
1115312:06:37.358 [pool-67-thread-1] INFO o.a.k.c.m.Metrics - Closing reporter org.apache.kafka.common.metrics.JmxReporter
1115412:06:37.358 [pool-67-thread-1] INFO o.a.k.c.m.Metrics - Metrics reporters closed
1115512:06:37.358 [pool-67-thread-1] INFO o.a.k.c.u.AppInfoParser - App info kafka.server for 0 unregistered
11156[info] KafkaTest:
11157[info] source
11158[info] - should receive messages from a topic
11159[info] stage
11160[info] - should publish messages to a topic
11161[info] stage
11162[info] - should commit offsets of processed messages
11163[info] drain
11164[info] - should publish messages to a topic
11165[info] drain
11166[info] - should commit offsets of processed messages
11167[info] drain
11168[info] - should commit offsets using runCommit
11169[info] stage
11170[info] - should commit offsets using mapCommit
11171[info] stage
11172[info] - should commit offsets when consuming a finite stream using take
11173
11174************************
11175Build summary:
11176[{
11177 "module": "flow-reactive-streams",
11178 "compile": {"status": "ok", "tookMs": 19204, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11179 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11180 "test-compile": {"status": "ok", "tookMs": 265, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11181 "test": {"status": "ok", "tookMs": 185, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11182 "publish": {"status": "skipped", "tookMs": 0},
11183 "metadata": {
11184 "crossScalaVersions": ["2.12.20"]
11185}
11186},{
11187 "module": "mdc-logback",
11188 "compile": {"status": "ok", "tookMs": 650, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11189 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11190 "test-compile": {"status": "ok", "tookMs": 1044, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11191 "test": {"status": "ok", "tookMs": 537, "passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1, "byFramework": [{"framework": "unknown", "stats": {"passed": 1, "failed": 0, "ignored": 0, "skipped": 0, "total": 1}}]},
11192 "publish": {"status": "skipped", "tookMs": 0},
11193 "metadata": {
11194 "crossScalaVersions": ["2.12.20"]
11195}
11196},{
11197 "module": "core",
11198 "compile": {"status": "ok", "tookMs": 60, "warnings": 13, "errors": 0, "sourceVersion": "3.8"},
11199 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11200 "test-compile": {"status": "ok", "tookMs": 22298, "warnings": 20, "errors": 0, "sourceVersion": "3.8"},
11201 "test": {"status": "ok", "tookMs": 149803, "passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802, "byFramework": [{"framework": "unknown", "stats": {"passed": 795, "failed": 0, "ignored": 7, "skipped": 0, "total": 802}}]},
11202 "publish": {"status": "skipped", "tookMs": 0},
11203 "metadata": {
11204 "crossScalaVersions": ["2.12.20"]
11205}
11206},{
11207 "module": "cron",
11208 "compile": {"status": "ok", "tookMs": 414, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11209 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11210 "test-compile": {"status": "ok", "tookMs": 799, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11211 "test": {"status": "ok", "tookMs": 4299, "passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3, "byFramework": [{"framework": "unknown", "stats": {"passed": 3, "failed": 0, "ignored": 0, "skipped": 0, "total": 3}}]},
11212 "publish": {"status": "skipped", "tookMs": 0},
11213 "metadata": {
11214 "crossScalaVersions": ["2.12.20"]
11215}
11216},{
11217 "module": "otel-context",
11218 "compile": {"status": "ok", "tookMs": 274, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11219 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11220 "test-compile": {"status": "ok", "tookMs": 159, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11221 "test": {"status": "ok", "tookMs": 150, "passed": 0, "failed": 0, "ignored": 0, "skipped": 0, "total": 0, "byFramework": []},
11222 "publish": {"status": "skipped", "tookMs": 0},
11223 "metadata": {
11224 "crossScalaVersions": ["2.12.20"]
11225}
11226},{
11227 "module": "kafka",
11228 "compile": {"status": "ok", "tookMs": 978, "warnings": 1, "errors": 0, "sourceVersion": "3.8"},
11229 "doc": {"status": "skipped", "tookMs": 0, "files": 0, "totalSizeKb": 0},
11230 "test-compile": {"status": "ok", "tookMs": 1446, "warnings": 0, "errors": 0, "sourceVersion": "3.8"},
11231 "test": {"status": "ok", "tookMs": 88613, "passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8, "byFramework": [{"framework": "unknown", "stats": {"passed": 8, "failed": 0, "ignored": 0, "skipped": 0, "total": 8}}]},
11232 "publish": {"status": "skipped", "tookMs": 0},
11233 "metadata": {
11234 "crossScalaVersions": ["2.12.20"]
11235}
11236}]
11237************************
11238[success] Total time: 316 s (0:05:16.0), completed Nov 28, 2025, 12:06:37 PM
11239[0JChecking patch project/plugins.sbt...
11240Checking patch build.sbt...
11241Applied patch project/plugins.sbt cleanly.
11242Applied patch build.sbt cleanly.