backend()->suspend() below returns new Login1SuspendJob() or the other backend but it is never freed. KJob *suspendJob = 0; switch ((Mode) (args["Type"].toUInt())) { case ToRamMode: suspendJob = backend()->suspend(PowerDevil::BackendInterface::ToRam); break; case ToDiskMode: suspendJob = backend()->suspend(PowerDevil::BackendInterface::ToDisk); break; case SuspendHybridMode: suspendJob = backend()->suspend(PowerDevil::BackendInterface::HybridSuspend); break; case ShutdownMode: KWorkSpace::requestShutDown(KWorkSpace::ShutdownConfirmNo, KWorkSpace::ShutdownTypeHalt); break; case LogoutDialogMode: KWorkSpace::requestShutDown(KWorkSpace::ShutdownConfirmYes); break; case LockScreenMode: lockScreenAndWait(); break; default: break; } if (suspendJob) { suspendJob->start(); } } valgrind trace for that: ==9271== 664 (40 direct, 624 indirect) bytes in 1 blocks are definitely lost in loss record 11,298 of 12,046 ==9271== at 0x4C2BCA7: operator new(unsigned long) (in /usr/lib64/valgrind/vgpreload_memcheck-amd64-linux.so) ==9271== by 0x1C43435D: PowerDevilUPowerBackend::suspend(PowerDevil::BackendInterface::SuspendMethod) (powerdevilupowerbackend.cpp:284) ==9271== by 0x1C67DCE5: PowerDevil::BundledActions::SuspendSession::triggerImpl(QMap<QString, QVariant> const&) (suspendsession.cpp:99) ==9271== by 0x1C662399: PowerDevil::Action::trigger(QMap<QString, QVariant> const&) (powerdevilaction.cpp:100) ==9271== by 0x1C66AF36: PowerDevil::Core::triggerSuspendSession(unsigned int) (powerdevilcore.cpp:792) ==9271== by 0x1C66C134: PowerDevil::Core::qt_static_metacall(QObject*, QMetaObject::Call, int, void**) (powerdevilcore.moc:141) ==9271== by 0x66621AE: QMetaObject::activate(QObject*, QMetaObject const*, int, void**) (qobject.cpp:3548) ==9271== by 0x6B84661: QAction::triggered(bool) (moc_qaction.cpp:277) ==9271== by 0x6B8484F: QAction::activate(QAction::ActionEvent) (qaction.cpp:1257) ==9271== by 0x59AD310: ??? (in /usr/lib64/libkdeui.so.5.10.0) ==9271== by 0x66621AE: QMetaObject::activate(QObject*, QMetaObject const*, int, void**) (qobject.cpp:3548) ==9271== by 0x5AAC8F8: ??? (in /usr/lib64/libkdeui.so.5.10.0) ==9271== by 0x5AACD63: ??? (in /usr/lib64/libkdeui.so.5.10.0) ==9271== by 0x5AAD3CE: ??? (in /usr/lib64/libkdeui.so.5.10.0) ==9271== by 0x6276D89: QDBusConnectionPrivate::deliverCall(QObject*, int, QDBusMessage const&, QList<int> const&, int) (qdbusintegrator.cpp:951) ==9271== by 0x666168D: QObject::event(QEvent*) (qobject.cpp:1203) ==9271== by 0x6B8A86B: QApplicationPrivate::notify_helper(QObject*, QEvent*) (qapplication.cpp:4562) ==9271== by 0x6B8ECEA: QApplication::notify(QObject*, QEvent*) (qapplication.cpp:4423) ==9271== by 0x5968CB5: KApplication::notify(QObject*, QEvent*) (in /usr/lib64/libkdeui.so.5.10.0) ==9271== by 0x664CC9D: QCoreApplication::notifyInternal(QObject*, QEvent*) (qcoreapplication.cpp:946) ==9271== by 0x6650600: QCoreApplicationPrivate::sendPostedEvents(QObject*, int, QThreadData*) (qcoreapplication.h:231) ==9271== by 0x667B042: postEventSourceDispatch(_GSource*, int (*)(void*), void*) (qcoreapplication.h:236) ==9271== by 0xA8BD7D4: g_main_context_dispatch (gmain.c:2715) ==9271== by 0xA8BDB07: g_main_context_iterate.isra.24 (gmain.c:3290) ==9271== by 0xA8BDBC3: g_main_context_iteration (gmain.c:3351) ==9271== by 0x667B1D5: QEventDispatcherGlib::processEvents(QFlags<QEventLoop::ProcessEventsFlag>) (qeventdispatcher_glib.cpp:424) ==9271== by 0x6C2AC1D: QGuiEventDispatcherGlib::processEvents(QFlags<QEventLoop::ProcessEventsFlag>) (qguieventdispatcher_glib.cpp:204) ==9271== by 0x664B9EE: QEventLoop::processEvents(QFlags<QEventLoop::ProcessEventsFlag>) (qeventloop.cpp:149) ==9271== by 0x664BC77: QEventLoop::exec(QFlags<QEventLoop::ProcessEventsFlag>) (qeventloop.cpp:204) ==9271== by 0x6650917: QCoreApplication::exec() (qcoreapplication.cpp:1218) ==9271== by 0x4E3BA4F: kdemain (kded.cpp:924) ==9271== by 0x5065A14: (below main) (libc-start.c:258) Reproducible: Always
There is also this for: connect(watcher, SIGNAL(finished(QDBusPendingCallWatcher*)), this, SLOT(sendResult(QDBusPendingCallWatcher*))); which suggests sendResult is never called to destroy Login1SuspendJob? ==9271== 664 (80 direct, 584 indirect) bytes in 1 blocks are definitely lost in loss record 11,299 of 12,046 ==9271== at 0x4C2C27B: malloc (in /usr/lib64/valgrind/vgpreload_memcheck-amd64-linux.so) ==9271== by 0x6666EB1: QVector<QObjectPrivate::ConnectionList>::realloc(int, int) (qvector.h:405) ==9271== by 0x666384B: QObjectPrivate::addConnection(int, QObjectPrivate::Connection*) (qvector.h:343) ==9271== by 0x6664EA4: QMetaObjectPrivate::connect(QObject const*, int, QObject const*, int, QMetaObject const*, int, int*) (qobject.cpp:3196) ==9271== by 0x6666394: QObject::connect(QObject const*, char const*, QObject const*, char const*, Qt::ConnectionType) (qobject.cpp:2654) ==9271== by 0x1C433FF5: Login1SuspendJob::doStart() (login1suspendjob.cpp:64) ==9271== by 0x666168D: QObject::event(QEvent*) (qobject.cpp:1203) ==9271== by 0x6B8A86B: QApplicationPrivate::notify_helper(QObject*, QEvent*) (qapplication.cpp:4562)
And this, line 63 is the alloc: QDBusPendingCallWatcher *watcher = new QDBusPendingCallWatcher(reply, this); ==12345== 1,328 (80 direct, 1,248 indirect) bytes in 2 blocks are definitely lost in loss record 11,618 of 12,115 ==12345== at 0x4C2C27B: malloc (in /usr/lib64/valgrind/vgpreload_memcheck-amd64-linux.so) ==12345== by 0x656E895: QListData::detach_grow(int*, int) (qlist.cpp:85) ==12345== by 0x663214A: QList<QObject*>::detach_helper_grow(int, int) (qlist.h:679) ==12345== by 0x6632240: QList<QObject*>::append(QObject* const&) (qlist.h:510) ==12345== by 0x66600BD: QObjectPrivate::setParent_helper(QObject*) (qobject.cpp:1955) ==12345== by 0x66602E9: QObject::QObject(QObjectPrivate&, QObject*) (qobject.cpp:782) ==12345== by 0x62B079A: QDBusPendingCallWatcher::QDBusPendingCallWatcher(QDBusPendingCall const&, QObject*) (qdbuspendingcall.cpp:494) ==12345== by 0x1C433FD9: Login1SuspendJob::doStart() (login1suspendjob.cpp:63)
Oh, crap, this is a downstream (openSUSE) bug.
(In reply to comment #3) > Oh, crap, this is a downstream (openSUSE) bug. https://bugzilla.novell.com/show_bug.cgi?id=808567
(In reply to comment #4) > (In reply to comment #3) > > Oh, crap, this is a downstream (openSUSE) bug. > > https://bugzilla.novell.com/show_bug.cgi?id=808567 Which stems from a patch which is taken from kde... So I think this is still an issue, but not with 4.10. And fedora has that patch in 4.10 included too.
Mi machine is intel core 2duo, Fedora 17 x86_64 but it's a desktop the machine can sleep and hibernate correctly, also put's to sleep the monitor correctly via dvi and in the past via vga my problem in my case is which kded4 grows the resident memory and consumes virtual memory too (swap) until killed and restarting kded4 and the cycle begin again the grow in memory happens without sleeping or hibernate the machine the problem happened with kde 4.9.x 4.10 and currently happen with 4.10.1 I only restart after 7 or more days of uptime sometimes 40 days I have to kill kded4 several times but besides that the os and kde worked perfectly and I have a netbook Fedora 17 i686 kde 4.9.2 and now 4.10.1 and it don't have the problem and it sleep, hibernates and turn off the lcd correctly. thanks, gabrielo
Gabriel can you report a new bug or rather add your feedback at: https://bugs.kde.org/show_bug.cgi?id=312092 ? Since the memory growing happens without sleeping the computer it is not related to this bug.
Git commit 7a01d55b55591fb3d73281541116b36e72f49e24 by Lukáš Tinkl. Committed on 12/03/2013 at 17:01. Pushed by lukas into branch 'KDE/4.10'. stop leaking the login1 suspend job FIXED-IN: 4.10.2 M +8 -0 powerdevil/daemon/backends/upower/login1suspendjob.cpp M +1 -0 powerdevil/daemon/backends/upower/login1suspendjob.h http://commits.kde.org/kde-workspace/7a01d55b55591fb3d73281541116b36e72f49e24
This is fixing only login1job, is the upower one not leaking?
void UPowerSuspendJob::resumeDone() { emitResult(); }
(In reply to comment #8) > Git commit 7a01d55b55591fb3d73281541116b36e72f49e24 by Lukáš Tinkl. > Committed on 12/03/2013 at 17:01. > Pushed by lukas into branch 'KDE/4.10'. > > stop leaking the login1 suspend job > FIXED-IN: 4.10.2 > > M +8 -0 powerdevil/daemon/backends/upower/login1suspendjob.cpp > M +1 -0 powerdevil/daemon/backends/upower/login1suspendjob.h > > http://commits.kde.org/kde-workspace/7a01d55b55591fb3d73281541116b36e72f49e24 This won't help. The signal is connected already at: https://projects.kde.org/projects/kde/kde-workspace/repository/revisions/7a01d55b55591fb3d73281541116b36e72f49e24/entry/powerdevil/daemon/backends/upower/powerdevilupowerbackend.cpp#L170 The thing is I have systemd < 198 and PrepareForSleep from login1 is never called, because upower is used for suspending...
(In reply to comment #11) > The thing is I have systemd < 198 and PrepareForSleep from login1 is never > called, because upower is used for suspending... Other way round actually. Suspending is done by login1, but in that case login1 PrepareForSleep signal is not issued. So not the Resuming signal from upower... IOW I think upower should be used on systems with systemd < 198 and login1 not used at all.
That makes sense (using upower for anything < 198)
We are now only using the signal for systemd < 198. Thanks for all the feedback !
Hopefully this is it: https://projects.kde.org/projects/kde/kde-workspace/repository/revisions/d56bf9cf8ae4b7fa84abeb005c9ebd467909dd49/diff