/* Service connection management * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include #include "ar-internal.h" /* * Find a service connection under RCU conditions. * * We could use a hash table, but that is subject to bucket stuffing by an * attacker as the client gets to pick the epoch and cid values and would know * the hash function. So, instead, we use a hash table for the peer and from * that an rbtree to find the service connection. Under ordinary circumstances * it might be slower than a large hash table, but it is at least limited in * depth. */ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer, struct sk_buff *skb) { struct rxrpc_connection *conn = NULL; struct rxrpc_conn_proto k; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rb_node *p; unsigned int seq = 0; k.epoch = sp->hdr.epoch; k.cid = sp->hdr.cid & RXRPC_CIDMASK; do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ read_seqbegin_or_lock(&peer->service_conn_lock, &seq); p = rcu_dereference_raw(peer->service_conns.rb_node); while (p) { conn = rb_entry(p, struct rxrpc_connection, service_node); if (conn->proto.index_key < k.index_key) p = rcu_dereference_raw(p->rb_left); else if (conn->proto.index_key > k.index_key) p = rcu_dereference_raw(p->rb_right); else goto done; conn = NULL; } } while (need_seqretry(&peer->service_conn_lock, seq)); done: done_seqretry(&peer->service_conn_lock, seq); _leave(" = %d", conn ? conn->debug_id : -1); return conn; } /* * Insert a service connection into a peer's tree, thereby making it a target * for incoming packets. */ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer, struct rxrpc_connection *conn) { struct rxrpc_connection *cursor = NULL; struct rxrpc_conn_proto k = conn->proto; struct rb_node **pp, *parent; write_seqlock_bh(&peer->service_conn_lock); pp = &peer->service_conns.rb_node; parent = NULL; while (*pp) { parent = *pp; cursor = rb_entry(parent, struct rxrpc_connection, service_node); if (cursor->proto.index_key < k.index_key) pp = &(*pp)->rb_left; else if (cursor->proto.index_key > k.index_key) pp = &(*pp)->rb_right; else goto found_extant_conn; } rb_link_node_rcu(&conn->service_node, parent, pp); rb_insert_color(&conn->service_node, &peer->service_conns); conn_published: set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags); write_sequnlock_bh(&peer->service_conn_lock); _leave(" = %d [new]", conn->debug_id); return; found_extant_conn: if (atomic_read(&cursor->usage) == 0) goto replace_old_connection; write_sequnlock_bh(&peer->service_conn_lock); /* We should not be able to get here. rxrpc_incoming_connection() is * called in a non-reentrant context, so there can't be a race to * insert a new connection. */ BUG(); replace_old_connection: /* The old connection is from an outdated epoch. */ _debug("replace conn"); rb_replace_node_rcu(&cursor->service_node, &conn->service_node, &peer->service_conns); clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags); goto conn_published; } /* * Preallocate a service connection. The connection is placed on the proc and * reap lists so that we don't have to get the lock from BH context. */ struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp) { struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp); if (conn) { /* We maintain an extra ref on the connection whilst it is on * the rxrpc_connections list. */ conn->state = RXRPC_CONN_SERVICE_PREALLOC; atomic_set(&conn->usage, 2); write_lock(&rxrpc_connection_lock); list_add_tail(&conn->link, &rxrpc_connections); list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list); write_unlock(&rxrpc_connection_lock); trace_rxrpc_conn(conn, rxrpc_conn_new_service, atomic_read(&conn->usage), __builtin_return_address(0)); } return conn; } /* * Set up an incoming connection. This is called in BH context with the RCU * read lock held. */ void rxrpc_new_incoming_connection(struct rxrpc_connection *conn, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); _enter(""); conn->proto.epoch = sp->hdr.epoch; conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK; conn->params.service_id = sp->hdr.serviceId; conn->security_ix = sp->hdr.securityIndex; conn->out_clientflag = 0; if (conn->security_ix) conn->state = RXRPC_CONN_SERVICE_UNSECURED; else conn->state = RXRPC_CONN_SERVICE; /* Make the connection a target for incoming packets. */ rxrpc_publish_service_conn(conn->params.peer, conn); _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid); } /* * Remove the service connection from the peer's tree, thereby removing it as a * target for incoming packets. */ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn) { struct rxrpc_peer *peer = conn->params.peer; write_seqlock_bh(&peer->service_conn_lock); if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags)) rb_erase(&conn->service_node, &peer->service_conns); write_sequnlock_bh(&peer->service_conn_lock); } /cgit.cgi/linux/net-next.git/diff/net/ipv4/Makefile?id=d0e287a401d9acf67b75180b26e2d62b7d482652&id2=d00b74613fb18dfd0a5aa99270ee2e72d5c808d7'>diff)
regulator: axp20x: AXP806: Fix dcdcb being set instead of dcdce
A typo or copy-paste bug means that the register access intended for regulator dcdce goes to dcdcb instead. This patch corrects it. Fixes: 2ca342d391e3 (regulator: axp20x: Support AXP806 variant) Signed-off-by: Rask Ingemann Lambertsen <rask@formelder.dk> Acked-by: Chen-Yu Tsai <wens@csie.org> Signed-off-by: Mark Brown <broonie@kernel.org> Cc: stable@vger.kernel.org
Diffstat (limited to 'net/ipv4/Makefile')