mirror of
https://github.com/Dasharo/skiboot.git
synced 2026-03-06 14:50:44 -08:00
49496485fe
SPDX makes it a simpler diff. I have audited the commit history of each file to ensure that they are exclusively authored by IBM and thus we have the right to relicense. The motivation behind this is twofold: 1) We want to enable experiments with coreboot, which is GPLv2 licensed 2) An upcoming firmware component wants to incorporate code from skiboot and code from the Linux kernel, which is GPLv2 licensed. I have gone through the IBM internal way of gaining approval for this. The following files are not exclusively authored by IBM, so are *not* included in this update (I will be seeking approval from contributors): core/direct-controls.c core/flash.c core/pcie-slot.c external/common/arch_flash_unknown.c external/common/rules.mk external/gard/Makefile external/gard/rules.mk external/opal-prd/Makefile external/pflash/Makefile external/xscom-utils/Makefile hdata/vpd.c hw/dts.c hw/ipmi/ipmi-watchdog.c hw/phb4.c include/cpu.h include/phb4.h include/platform.h libflash/libffs.c libstb/mbedtls/sha512.c libstb/mbedtls/sha512.h platforms/astbmc/barreleye.c platforms/astbmc/garrison.c platforms/astbmc/mihawk.c platforms/astbmc/nicole.c platforms/astbmc/p8dnu.c platforms/astbmc/p8dtu.c platforms/astbmc/p9dsu.c platforms/astbmc/vesnin.c platforms/rhesus/ec/config.h platforms/rhesus/ec/gpio.h platforms/rhesus/gpio.c platforms/rhesus/rhesus.c platforms/astbmc/talos.c platforms/astbmc/romulus.c Signed-off-by: Stewart Smith <stewart@linux.ibm.com> [oliver: fixed up the drift] Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
107 lines
2.8 KiB
C
107 lines
2.8 KiB
C
// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
|
/* Copyright 2013-2019 IBM Corp. */
|
|
|
|
#ifndef __LOCK_H
|
|
#define __LOCK_H
|
|
|
|
#include <stdbool.h>
|
|
#include <processor.h>
|
|
#include <cmpxchg.h>
|
|
#include <ccan/list/list.h>
|
|
#include <ccan/str/str.h>
|
|
|
|
#ifdef DEBUG_LOCKS_BACKTRACE
|
|
#include <stack.h>
|
|
|
|
#define LOCKS_BACKTRACE_MAX_ENTS 60
|
|
#endif
|
|
|
|
struct lock {
|
|
/* Lock value has bit 63 as lock bit and the PIR of the owner
|
|
* in the top 32-bit
|
|
*/
|
|
uint64_t lock_val;
|
|
|
|
/*
|
|
* Set to true if lock is involved in the console flush path
|
|
* in which case taking it will suspend console flushing
|
|
*/
|
|
bool in_con_path;
|
|
|
|
/* file/line of lock owner */
|
|
const char *owner;
|
|
|
|
#ifdef DEBUG_LOCKS_BACKTRACE
|
|
struct bt_entry bt_buf[LOCKS_BACKTRACE_MAX_ENTS];
|
|
struct bt_metadata bt_metadata;
|
|
#endif
|
|
|
|
/* linkage in per-cpu list of owned locks */
|
|
struct list_node list;
|
|
};
|
|
|
|
/* Initializer... not ideal but works for now. If we need different
|
|
* values for the fields and/or start getting warnings we'll have to
|
|
* play macro tricks
|
|
*/
|
|
#define LOCK_UNLOCKED { 0 }
|
|
|
|
/* Note vs. libc and locking:
|
|
*
|
|
* The printf() family of
|
|
* functions use stack based t buffers and call into skiboot
|
|
* underlying read() and write() which use a console lock.
|
|
*
|
|
* The underlying FSP console code will thus operate within that
|
|
* console lock.
|
|
*
|
|
* The libc does *NOT* lock stream buffer operations, so don't
|
|
* try to scanf() from the same FILE from two different processors.
|
|
*
|
|
* FSP operations are locked using an FSP lock, so all processors
|
|
* can safely call the FSP API
|
|
*
|
|
* Note about ordering:
|
|
*
|
|
* lock() is a full memory barrier. unlock() is a lwsync
|
|
*
|
|
*/
|
|
|
|
extern bool bust_locks;
|
|
|
|
static inline void init_lock(struct lock *l)
|
|
{
|
|
*l = (struct lock)LOCK_UNLOCKED;
|
|
}
|
|
|
|
#define LOCK_CALLER __FILE__ ":" stringify(__LINE__)
|
|
|
|
#define try_lock(l) try_lock_caller(l, LOCK_CALLER)
|
|
#define lock(l) lock_caller(l, LOCK_CALLER)
|
|
#define lock_recursive(l) lock_recursive_caller(l, LOCK_CALLER)
|
|
|
|
extern bool try_lock_caller(struct lock *l, const char *caller);
|
|
extern void lock_caller(struct lock *l, const char *caller);
|
|
extern void unlock(struct lock *l);
|
|
|
|
extern bool lock_held_by_me(struct lock *l);
|
|
|
|
/* The debug output can happen while the FSP lock, so we need some kind
|
|
* of recursive lock support here. I don't want all locks to be recursive
|
|
* though, thus the caller need to explicitly call lock_recursive which
|
|
* returns false if the lock was already held by this cpu. If it returns
|
|
* true, then the caller shall release it when done.
|
|
*/
|
|
extern bool lock_recursive_caller(struct lock *l, const char *caller);
|
|
|
|
/* Called after per-cpu data structures are available */
|
|
extern void init_locks(void);
|
|
|
|
/* Dump the list of locks held by this CPU */
|
|
extern void dump_locks_list(void);
|
|
|
|
/* Clean all locks held by CPU (and warn if any) */
|
|
extern void drop_my_locks(bool warn);
|
|
|
|
#endif /* __LOCK_H */
|