2016-03-31 10:45:35 -04:00
|
|
|
#!/usr/bin/env python
|
2017-05-01 16:14:07 -04:00
|
|
|
# Copyright (c) 2016, 2017 Red Hat, Inc.
|
2018-01-23 18:28:52 -08:00
|
|
|
# Copyright (c) 2018 Nicira, Inc.
|
2016-03-31 10:45:35 -04:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at:
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
import email
|
|
|
|
import getopt
|
2017-06-14 13:42:54 -07:00
|
|
|
import os
|
2016-03-31 10:45:35 -04:00
|
|
|
import re
|
|
|
|
import sys
|
|
|
|
|
2018-07-31 16:37:38 -04:00
|
|
|
RETURN_CHECK_INITIAL_STATE = 0
|
|
|
|
RETURN_CHECK_STATE_WITH_RETURN = 1
|
|
|
|
RETURN_CHECK_AWAITING_BRACE = 2
|
2016-03-31 10:45:35 -04:00
|
|
|
__errors = 0
|
|
|
|
__warnings = 0
|
2018-07-31 16:37:38 -04:00
|
|
|
empty_return_check_state = 0
|
2016-10-21 14:49:03 -04:00
|
|
|
print_file_name = None
|
2016-10-21 14:49:09 -04:00
|
|
|
checking_file = False
|
2017-05-01 16:14:07 -04:00
|
|
|
total_line = 0
|
|
|
|
colors = False
|
2018-04-01 11:06:54 -04:00
|
|
|
spellcheck_comments = False
|
2018-06-20 14:40:57 -04:00
|
|
|
quiet = False
|
2018-11-01 08:06:32 -07:00
|
|
|
spell_check_dict = None
|
|
|
|
|
|
|
|
|
|
|
|
def open_spell_check_dict():
|
|
|
|
import enchant
|
|
|
|
|
|
|
|
try:
|
|
|
|
extra_keywords = ['ovs', 'vswitch', 'vswitchd', 'ovs-vswitchd',
|
|
|
|
'netdev', 'selinux', 'ovs-ctl', 'dpctl', 'ofctl',
|
|
|
|
'openvswitch', 'dpdk', 'hugepage', 'hugepages',
|
|
|
|
'pmd', 'upcall', 'vhost', 'rx', 'tx', 'vhostuser',
|
|
|
|
'openflow', 'qsort', 'rxq', 'txq', 'perf', 'stats',
|
|
|
|
'struct', 'int', 'char', 'bool', 'upcalls', 'nicira',
|
|
|
|
'bitmask', 'ipv4', 'ipv6', 'tcp', 'tcp4', 'tcpv4',
|
|
|
|
'udp', 'udp4', 'udpv4', 'icmp', 'icmp4', 'icmpv6',
|
|
|
|
'vlan', 'vxlan', 'cksum', 'csum', 'checksum',
|
|
|
|
'ofproto', 'numa', 'mempool', 'mempools', 'mbuf',
|
|
|
|
'mbufs', 'hmap', 'cmap', 'smap', 'dhcpv4', 'dhcp',
|
|
|
|
'dhcpv6', 'opts', 'metadata', 'geneve', 'mutex',
|
|
|
|
'netdev', 'netdevs', 'subtable', 'virtio', 'qos',
|
|
|
|
'policer', 'datapath', 'tunctl', 'attr', 'ethernet',
|
|
|
|
'ether', 'defrag', 'defragment', 'loopback', 'sflow',
|
|
|
|
'acl', 'initializer', 'recirc', 'xlated', 'unclosed',
|
|
|
|
'netlink', 'msec', 'usec', 'nsec', 'ms', 'us', 'ns',
|
|
|
|
'kilobits', 'kbps', 'kilobytes', 'megabytes', 'mbps',
|
|
|
|
'gigabytes', 'gbps', 'megabits', 'gigabits', 'pkts',
|
|
|
|
'tuple', 'miniflow', 'megaflow', 'conntrack',
|
|
|
|
'vlans', 'vxlans', 'arg', 'tpid', 'xbundle',
|
|
|
|
'xbundles', 'mbundle', 'mbundles', 'netflow',
|
|
|
|
'localnet', 'odp', 'pre', 'dst', 'dest', 'src',
|
|
|
|
'ethertype', 'cvlan', 'ips', 'msg', 'msgs',
|
|
|
|
'liveness', 'userspace', 'eventmask', 'datapaths',
|
|
|
|
'slowpath', 'fastpath', 'multicast', 'unicast',
|
|
|
|
'revalidation', 'namespace', 'qdisc', 'uuid',
|
|
|
|
'ofport', 'subnet', 'revalidation', 'revalidator',
|
|
|
|
'revalidate', 'l2', 'l3', 'l4', 'openssl', 'mtu',
|
|
|
|
'ifindex', 'enum', 'enums', 'http', 'https', 'num',
|
|
|
|
'vconn', 'vconns', 'conn', 'nat', 'memset', 'memcmp',
|
|
|
|
'strcmp', 'strcasecmp', 'tc', 'ufid', 'api',
|
|
|
|
'ofpbuf', 'ofpbufs', 'hashmaps', 'hashmap', 'deref',
|
|
|
|
'dereference', 'hw', 'prio', 'sendmmsg', 'sendmsg',
|
|
|
|
'malloc', 'free', 'alloc', 'pid', 'ppid', 'pgid',
|
|
|
|
'uid', 'gid', 'sid', 'utime', 'stime', 'cutime',
|
|
|
|
'cstime', 'vsize', 'rss', 'rsslim', 'whcan', 'gtime',
|
|
|
|
'eip', 'rip', 'cgtime', 'dbg', 'gw', 'sbrec', 'bfd',
|
|
|
|
'sizeof', 'pmds', 'nic', 'nics', 'hwol', 'encap',
|
|
|
|
'decap', 'tlv', 'tlvs', 'decapsulation', 'fd',
|
|
|
|
'cacheline', 'xlate', 'skiplist', 'idl',
|
|
|
|
'comparator', 'natting', 'alg', 'pasv', 'epasv',
|
|
|
|
'wildcard', 'nated', 'amd64', 'x86_64',
|
|
|
|
'recirculation']
|
|
|
|
|
|
|
|
global spell_check_dict
|
|
|
|
spell_check_dict = enchant.Dict("en_US")
|
|
|
|
for kw in extra_keywords:
|
|
|
|
spell_check_dict.add(kw)
|
|
|
|
|
|
|
|
return True
|
|
|
|
except:
|
|
|
|
return False
|
2016-10-21 14:49:03 -04:00
|
|
|
|
|
|
|
|
2017-05-01 16:14:07 -04:00
|
|
|
def get_color_end():
|
|
|
|
global colors
|
|
|
|
if colors:
|
|
|
|
return "\033[00m"
|
|
|
|
return ""
|
2016-03-31 10:45:35 -04:00
|
|
|
|
|
|
|
|
2017-05-01 16:14:07 -04:00
|
|
|
def get_red_begin():
|
|
|
|
global colors
|
|
|
|
if colors:
|
|
|
|
return "\033[91m"
|
|
|
|
return ""
|
|
|
|
|
|
|
|
|
|
|
|
def get_yellow_begin():
|
|
|
|
global colors
|
|
|
|
if colors:
|
|
|
|
return "\033[93m"
|
|
|
|
return ""
|
|
|
|
|
|
|
|
|
|
|
|
def print_error(message):
|
2016-03-31 10:45:35 -04:00
|
|
|
global __errors
|
2017-05-01 16:14:07 -04:00
|
|
|
print("%sERROR%s: %s" % (get_red_begin(), get_color_end(), message))
|
2016-03-31 10:45:35 -04:00
|
|
|
|
|
|
|
__errors = __errors + 1
|
|
|
|
|
|
|
|
|
2017-05-01 16:14:07 -04:00
|
|
|
def print_warning(message):
|
2016-03-31 10:45:35 -04:00
|
|
|
global __warnings
|
2017-05-01 16:14:07 -04:00
|
|
|
print("%sWARNING%s: %s" % (get_yellow_begin(), get_color_end(), message))
|
2016-03-31 10:45:35 -04:00
|
|
|
|
|
|
|
__warnings = __warnings + 1
|
|
|
|
|
|
|
|
|
2017-07-14 13:57:22 +03:00
|
|
|
def reset_counters():
|
2017-10-05 18:32:03 +03:00
|
|
|
global __errors, __warnings, total_line
|
2017-07-14 13:57:22 +03:00
|
|
|
|
|
|
|
__errors = 0
|
|
|
|
__warnings = 0
|
2017-10-05 18:32:03 +03:00
|
|
|
total_line = 0
|
2017-07-14 13:57:22 +03:00
|
|
|
|
|
|
|
|
2017-05-26 11:31:05 -07:00
|
|
|
# These are keywords whose names are normally followed by a space and
|
|
|
|
# something in parentheses (usually an expression) then a left curly brace.
|
|
|
|
#
|
|
|
|
# 'do' almost qualifies but it's also used as "do { ... } while (...);".
|
|
|
|
__parenthesized_constructs = 'if|for|while|switch|[_A-Z]+FOR_EACH[_A-Z]*'
|
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
__regex_added_line = re.compile(r'^\+{1,2}[^\+][\w\W]*')
|
2016-10-21 14:49:06 -04:00
|
|
|
__regex_subtracted_line = re.compile(r'^\-{1,2}[^\-][\w\W]*')
|
2016-03-31 10:45:35 -04:00
|
|
|
__regex_leading_with_whitespace_at_all = re.compile(r'^\s+')
|
|
|
|
__regex_leading_with_spaces = re.compile(r'^ +[\S]+')
|
|
|
|
__regex_trailing_whitespace = re.compile(r'[^\S]+$')
|
2016-04-18 13:14:29 -07:00
|
|
|
__regex_single_line_feed = re.compile(r'^\f$')
|
2017-05-26 11:31:05 -07:00
|
|
|
__regex_for_if_missing_whitespace = re.compile(r' +(%s)[\(]'
|
|
|
|
% __parenthesized_constructs)
|
|
|
|
__regex_for_if_too_much_whitespace = re.compile(r' +(%s) +[\(]'
|
|
|
|
% __parenthesized_constructs)
|
2016-10-21 14:49:08 -04:00
|
|
|
__regex_for_if_parens_whitespace = \
|
2017-05-26 11:31:05 -07:00
|
|
|
re.compile(r' +(%s) \( +[\s\S]+\)' % __parenthesized_constructs)
|
2016-05-20 11:52:59 -04:00
|
|
|
__regex_is_for_if_single_line_bracket = \
|
2017-05-26 11:31:05 -07:00
|
|
|
re.compile(r'^ +(%s) \(.*\)' % __parenthesized_constructs)
|
2016-10-21 14:49:08 -04:00
|
|
|
__regex_ends_with_bracket = \
|
|
|
|
re.compile(r'[^\s]\) {(\s+/\*[\s\Sa-zA-Z0-9\.,\?\*/+-]*)?$')
|
2017-05-01 16:14:09 -04:00
|
|
|
__regex_ptr_declaration_missing_whitespace = re.compile(r'[a-zA-Z0-9]\*[^*]')
|
2017-05-30 14:22:54 -07:00
|
|
|
__regex_is_comment_line = re.compile(r'^\s*(/\*|\*\s)')
|
2018-01-23 18:28:52 -08:00
|
|
|
__regex_has_comment = re.compile(r'.*(/\*|\*\s)')
|
2017-08-09 13:37:52 -07:00
|
|
|
__regex_trailing_operator = re.compile(r'^[^ ]* [^ ]*[?:]$')
|
2017-08-17 14:26:27 -07:00
|
|
|
__regex_conditional_else_bracing = re.compile(r'^\s*else\s*{?$')
|
|
|
|
__regex_conditional_else_bracing2 = re.compile(r'^\s*}\selse\s*$')
|
2018-01-23 18:28:52 -08:00
|
|
|
__regex_has_xxx_mark = re.compile(r'.*xxx.*', re.IGNORECASE)
|
2018-04-17 18:46:24 -03:00
|
|
|
__regex_added_doc_rst = re.compile(
|
|
|
|
r'\ndiff .*Documentation/.*rst\nnew file mode')
|
2018-07-31 16:37:38 -04:00
|
|
|
__regex_empty_return = re.compile(r'\s*return;')
|
2018-08-16 11:56:46 -04:00
|
|
|
__regex_if_macros = re.compile(r'^ +(%s) \([\S][\s\S]+[\S]\) { \\' %
|
|
|
|
__parenthesized_constructs)
|
2016-03-31 10:45:35 -04:00
|
|
|
|
|
|
|
skip_leading_whitespace_check = False
|
|
|
|
skip_trailing_whitespace_check = False
|
|
|
|
skip_block_whitespace_check = False
|
|
|
|
skip_signoff_check = False
|
|
|
|
|
2016-04-07 10:49:15 -07:00
|
|
|
# Don't enforce character limit on files that include these characters in their
|
|
|
|
# name, as they may have legitimate reasons to have longer lines.
|
|
|
|
#
|
|
|
|
# Python isn't checked as flake8 performs these checks during build.
|
2018-04-16 13:00:03 -07:00
|
|
|
line_length_blacklist = re.compile(
|
|
|
|
r'\.(am|at|etc|in|m4|mk|patch|py)$|debian/rules')
|
2016-04-07 10:49:15 -07:00
|
|
|
|
2017-06-06 08:39:34 -07:00
|
|
|
# Don't enforce a requirement that leading whitespace be all spaces on
|
|
|
|
# files that include these characters in their name, since these kinds
|
|
|
|
# of files need lines with leading tabs.
|
2018-04-16 13:00:03 -07:00
|
|
|
leading_whitespace_blacklist = re.compile(r'\.(mk|am|at)$|debian/rules')
|
2017-06-06 08:39:34 -07:00
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
|
2016-10-21 14:49:06 -04:00
|
|
|
def is_subtracted_line(line):
|
|
|
|
"""Returns TRUE if the line in question has been removed."""
|
|
|
|
return __regex_subtracted_line.search(line) is not None
|
|
|
|
|
2016-10-21 14:49:09 -04:00
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
def is_added_line(line):
|
|
|
|
"""Returns TRUE if the line in question is an added line.
|
|
|
|
"""
|
2016-10-21 14:49:09 -04:00
|
|
|
global checking_file
|
|
|
|
return __regex_added_line.search(line) is not None or checking_file
|
|
|
|
|
|
|
|
|
|
|
|
def added_line(line):
|
|
|
|
"""Returns the line formatted properly by removing diff syntax"""
|
|
|
|
global checking_file
|
|
|
|
if not checking_file:
|
|
|
|
return line[1:]
|
|
|
|
return line
|
2016-03-31 10:45:35 -04:00
|
|
|
|
|
|
|
|
|
|
|
def leading_whitespace_is_spaces(line):
|
|
|
|
"""Returns TRUE if the leading whitespace in added lines is spaces
|
|
|
|
"""
|
|
|
|
if skip_leading_whitespace_check:
|
|
|
|
return True
|
2016-04-18 13:14:29 -07:00
|
|
|
if (__regex_leading_with_whitespace_at_all.search(line) is not None and
|
|
|
|
__regex_single_line_feed.search(line) is None):
|
2016-03-31 10:45:35 -04:00
|
|
|
return __regex_leading_with_spaces.search(line) is not None
|
2016-04-18 13:14:29 -07:00
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def trailing_whitespace_or_crlf(line):
|
|
|
|
"""Returns TRUE if the trailing characters is whitespace
|
|
|
|
"""
|
|
|
|
if skip_trailing_whitespace_check:
|
|
|
|
return False
|
2016-04-18 13:14:29 -07:00
|
|
|
return (__regex_trailing_whitespace.search(line) is not None and
|
|
|
|
__regex_single_line_feed.search(line) is None)
|
2016-03-31 10:45:35 -04:00
|
|
|
|
|
|
|
|
|
|
|
def if_and_for_whitespace_checks(line):
|
|
|
|
"""Return TRUE if there is appropriate whitespace after if, for, while
|
|
|
|
"""
|
|
|
|
if skip_block_whitespace_check:
|
|
|
|
return True
|
|
|
|
if (__regex_for_if_missing_whitespace.search(line) is not None or
|
|
|
|
__regex_for_if_too_much_whitespace.search(line) is not None or
|
|
|
|
__regex_for_if_parens_whitespace.search(line)):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2016-05-20 11:52:59 -04:00
|
|
|
def if_and_for_end_with_bracket_check(line):
|
|
|
|
"""Return TRUE if there is not a bracket at the end of an if, for, while
|
|
|
|
block which fits on a single line ie: 'if (foo)'"""
|
|
|
|
|
|
|
|
def balanced_parens(line):
|
|
|
|
"""This is a rather naive counter - it won't deal with quotes"""
|
|
|
|
balance = 0
|
|
|
|
for letter in line:
|
2017-03-16 10:22:32 -04:00
|
|
|
if letter == '(':
|
2016-05-20 11:52:59 -04:00
|
|
|
balance += 1
|
2017-03-16 10:22:32 -04:00
|
|
|
elif letter == ')':
|
2016-05-20 11:52:59 -04:00
|
|
|
balance -= 1
|
2017-03-16 10:22:32 -04:00
|
|
|
return balance == 0
|
2016-05-20 11:52:59 -04:00
|
|
|
|
|
|
|
if __regex_is_for_if_single_line_bracket.search(line) is not None:
|
|
|
|
if not balanced_parens(line):
|
|
|
|
return True
|
2018-08-16 11:56:46 -04:00
|
|
|
|
|
|
|
if __regex_ends_with_bracket.search(line) is None and \
|
|
|
|
__regex_if_macros.match(line) is None:
|
2016-05-20 11:52:59 -04:00
|
|
|
return False
|
2017-08-17 14:26:27 -07:00
|
|
|
if __regex_conditional_else_bracing.match(line) is not None:
|
|
|
|
return False
|
|
|
|
if __regex_conditional_else_bracing2.match(line) is not None:
|
|
|
|
return False
|
2016-05-20 11:52:59 -04:00
|
|
|
return True
|
|
|
|
|
|
|
|
|
2017-03-08 09:54:06 -08:00
|
|
|
def pointer_whitespace_check(line):
|
|
|
|
"""Return TRUE if there is no space between a pointer name and the
|
|
|
|
asterisk that denotes this is a apionter type, ie: 'struct foo*'"""
|
|
|
|
return __regex_ptr_declaration_missing_whitespace.search(line) is not None
|
|
|
|
|
|
|
|
|
2017-05-01 16:14:03 -04:00
|
|
|
def line_length_check(line):
|
|
|
|
"""Return TRUE if the line length is too long"""
|
|
|
|
if len(line) > 79:
|
2018-05-09 10:52:49 -07:00
|
|
|
print_warning("Line is %d characters long (recommended limit is 79)"
|
|
|
|
% len(line))
|
2017-05-01 16:14:03 -04:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2017-05-30 14:22:54 -07:00
|
|
|
def is_comment_line(line):
|
|
|
|
"""Returns TRUE if the current line is part of a block comment."""
|
|
|
|
return __regex_is_comment_line.match(line) is not None
|
|
|
|
|
2018-01-26 11:45:52 -08:00
|
|
|
|
2018-01-23 18:28:52 -08:00
|
|
|
def has_comment(line):
|
|
|
|
"""Returns TRUE if the current line contains a comment or is part of
|
|
|
|
a block comment."""
|
|
|
|
return __regex_has_comment.match(line) is not None
|
2017-05-30 14:22:54 -07:00
|
|
|
|
2018-01-26 11:45:52 -08:00
|
|
|
|
2017-08-09 13:37:52 -07:00
|
|
|
def trailing_operator(line):
|
|
|
|
"""Returns TRUE if the current line ends with an operatorsuch as ? or :"""
|
|
|
|
return __regex_trailing_operator.match(line) is not None
|
|
|
|
|
2018-01-26 11:45:52 -08:00
|
|
|
|
2018-01-23 18:28:52 -08:00
|
|
|
def has_xxx_mark(line):
|
|
|
|
"""Returns TRUE if the current line contains 'xxx'."""
|
|
|
|
return __regex_has_xxx_mark.match(line) is not None
|
|
|
|
|
2017-08-09 13:37:52 -07:00
|
|
|
|
2018-04-01 11:06:54 -04:00
|
|
|
def filter_comments(current_line, keep=False):
|
2018-04-01 11:06:53 -04:00
|
|
|
"""remove all of the c-style comments in a line"""
|
|
|
|
STATE_NORMAL = 0
|
|
|
|
STATE_COMMENT_SLASH = 1
|
|
|
|
STATE_COMMENT_CONTENTS = 3
|
|
|
|
STATE_COMMENT_END_SLASH = 4
|
|
|
|
|
|
|
|
state = STATE_NORMAL
|
|
|
|
sanitized_line = ''
|
|
|
|
check_state = STATE_NORMAL
|
|
|
|
only_whitespace = True
|
|
|
|
|
2018-04-01 11:06:54 -04:00
|
|
|
if keep:
|
|
|
|
check_state = STATE_COMMENT_CONTENTS
|
|
|
|
|
2018-04-01 11:06:53 -04:00
|
|
|
for c in current_line:
|
|
|
|
if c == '/':
|
|
|
|
if state == STATE_NORMAL:
|
|
|
|
state = STATE_COMMENT_SLASH
|
|
|
|
elif state == STATE_COMMENT_SLASH:
|
|
|
|
# This is for c++ style comments. We will warn later
|
|
|
|
return sanitized_line[:1]
|
|
|
|
elif state == STATE_COMMENT_END_SLASH:
|
|
|
|
c = ''
|
|
|
|
state = STATE_NORMAL
|
|
|
|
elif c == '*':
|
|
|
|
if only_whitespace:
|
|
|
|
# just assume this is a continuation from the previous line
|
|
|
|
# as a comment
|
|
|
|
state = STATE_COMMENT_END_SLASH
|
|
|
|
elif state == STATE_COMMENT_SLASH:
|
|
|
|
state = STATE_COMMENT_CONTENTS
|
|
|
|
sanitized_line = sanitized_line[:-1]
|
|
|
|
elif state == STATE_COMMENT_CONTENTS:
|
|
|
|
state = STATE_COMMENT_END_SLASH
|
|
|
|
elif state == STATE_COMMENT_END_SLASH:
|
|
|
|
# Need to re-introduce the star from the previous state, since
|
|
|
|
# it may have been clipped by the state check below.
|
|
|
|
c = '*' + c
|
|
|
|
state = STATE_COMMENT_CONTENTS
|
|
|
|
elif state == STATE_COMMENT_SLASH:
|
|
|
|
# Need to re-introduce the slash from the previous state, since
|
|
|
|
# it may have been clipped by the state check below.
|
|
|
|
c = '/' + c
|
|
|
|
state = STATE_NORMAL
|
|
|
|
|
|
|
|
if state != check_state:
|
|
|
|
c = ''
|
|
|
|
|
|
|
|
if not c.isspace():
|
|
|
|
only_whitespace = False
|
|
|
|
|
|
|
|
sanitized_line += c
|
|
|
|
|
|
|
|
return sanitized_line
|
|
|
|
|
|
|
|
|
2018-04-01 11:06:54 -04:00
|
|
|
def check_comment_spelling(line):
|
2018-11-01 08:06:32 -07:00
|
|
|
if not spell_check_dict or not spellcheck_comments:
|
2018-04-01 11:06:54 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
comment_words = filter_comments(line, True).replace(':', ' ').split(' ')
|
|
|
|
for word in comment_words:
|
|
|
|
skip = False
|
|
|
|
strword = re.subn(r'\W+', '', word)[0].replace(',', '')
|
|
|
|
if len(strword) and not spell_check_dict.check(strword.lower()):
|
|
|
|
if any([check_char in word
|
|
|
|
for check_char in ['=', '(', '-', '_', '/', '\'']]):
|
|
|
|
skip = True
|
|
|
|
|
|
|
|
# special case the '.'
|
|
|
|
if '.' in word and not word.endswith('.'):
|
|
|
|
skip = True
|
|
|
|
|
|
|
|
# skip proper nouns and references to macros
|
|
|
|
if strword.isupper() or (strword[0].isupper() and
|
|
|
|
strword[1:].islower()):
|
|
|
|
skip = True
|
|
|
|
|
|
|
|
# skip words that start with numbers
|
|
|
|
if strword.startswith(tuple('0123456789')):
|
|
|
|
skip = True
|
|
|
|
|
|
|
|
if not skip:
|
2018-05-09 10:52:49 -07:00
|
|
|
print_warning("Check for spelling mistakes (e.g. \"%s\")"
|
|
|
|
% strword)
|
2018-04-01 11:06:54 -04:00
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2018-04-17 18:46:24 -03:00
|
|
|
def __check_doc_is_listed(text, doctype, docdir, docfile):
|
|
|
|
if doctype == 'rst':
|
|
|
|
beginre = re.compile(r'\+\+\+.*{}/index.rst'.format(docdir))
|
|
|
|
docre = re.compile(r'\n\+.*{}'.format(docfile.replace('.rst', '')))
|
|
|
|
elif doctype == 'automake':
|
|
|
|
beginre = re.compile(r'\+\+\+.*Documentation/automake.mk')
|
|
|
|
docre = re.compile(r'\n\+\t{}/{}'.format(docdir, docfile))
|
|
|
|
else:
|
|
|
|
raise NotImplementedError("Invalid doctype: {}".format(doctype))
|
|
|
|
|
|
|
|
res = beginre.search(text)
|
|
|
|
if res is None:
|
|
|
|
return True
|
|
|
|
|
|
|
|
hunkstart = res.span()[1]
|
|
|
|
hunkre = re.compile(r'\n(---|\+\+\+) (\S+)')
|
|
|
|
res = hunkre.search(text[hunkstart:])
|
|
|
|
if res is None:
|
|
|
|
hunkend = len(text)
|
|
|
|
else:
|
|
|
|
hunkend = hunkstart + res.span()[0]
|
|
|
|
|
|
|
|
hunk = text[hunkstart:hunkend]
|
|
|
|
# find if the file is being added.
|
|
|
|
if docre.search(hunk) is not None:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def __check_new_docs(text, doctype):
|
|
|
|
"""Check if the documentation is listed properly. If doctype is 'rst' then
|
|
|
|
the index.rst is checked. If the doctype is 'automake' then automake.mk
|
|
|
|
is checked. Returns TRUE if the new file is not listed."""
|
|
|
|
failed = False
|
|
|
|
new_docs = __regex_added_doc_rst.findall(text)
|
|
|
|
for doc in new_docs:
|
|
|
|
docpathname = doc.split(' ')[2]
|
|
|
|
gitdocdir, docfile = os.path.split(docpathname.rstrip('\n'))
|
|
|
|
if docfile == "index.rst":
|
|
|
|
continue
|
|
|
|
|
|
|
|
if gitdocdir.startswith('a/'):
|
|
|
|
docdir = gitdocdir.replace('a/', '', 1)
|
|
|
|
else:
|
|
|
|
docdir = gitdocdir
|
|
|
|
|
|
|
|
if __check_doc_is_listed(text, doctype, docdir, docfile):
|
|
|
|
if doctype == 'rst':
|
|
|
|
print_warning("New doc {} not listed in {}/index.rst".format(
|
|
|
|
docfile, docdir))
|
|
|
|
elif doctype == 'automake':
|
|
|
|
print_warning("New doc {} not listed in "
|
|
|
|
"Documentation/automake.mk".format(docfile))
|
|
|
|
else:
|
|
|
|
raise NotImplementedError("Invalid doctype: {}".format(
|
|
|
|
doctype))
|
|
|
|
|
|
|
|
failed = True
|
|
|
|
|
|
|
|
return failed
|
|
|
|
|
|
|
|
|
|
|
|
def check_doc_docs_automake(text):
|
|
|
|
return __check_new_docs(text, 'automake')
|
|
|
|
|
|
|
|
|
|
|
|
def check_new_docs_index(text):
|
|
|
|
return __check_new_docs(text, 'rst')
|
|
|
|
|
|
|
|
|
2018-07-31 16:37:38 -04:00
|
|
|
def empty_return_with_brace(line):
|
|
|
|
"""Returns TRUE if a function contains a return; followed
|
|
|
|
by one or more line feeds and terminates with a '}'
|
|
|
|
at start of line"""
|
|
|
|
|
|
|
|
def empty_return(line):
|
|
|
|
"""Returns TRUE if a function has a 'return;'"""
|
|
|
|
return __regex_empty_return.match(line) is not None
|
|
|
|
|
|
|
|
global empty_return_check_state
|
|
|
|
if empty_return_check_state == RETURN_CHECK_INITIAL_STATE \
|
|
|
|
and empty_return(line):
|
|
|
|
empty_return_check_state = RETURN_CHECK_STATE_WITH_RETURN
|
|
|
|
elif empty_return_check_state == RETURN_CHECK_STATE_WITH_RETURN \
|
|
|
|
and (re.match(r'^}$', line) or len(line) == 0):
|
|
|
|
if re.match('^}$', line):
|
|
|
|
empty_return_check_state = RETURN_CHECK_AWAITING_BRACE
|
|
|
|
else:
|
|
|
|
empty_return_check_state = RETURN_CHECK_INITIAL_STATE
|
|
|
|
|
|
|
|
if empty_return_check_state == RETURN_CHECK_AWAITING_BRACE:
|
|
|
|
empty_return_check_state = RETURN_CHECK_INITIAL_STATE
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2018-04-17 18:46:24 -03:00
|
|
|
file_checks = [
|
|
|
|
{'regex': __regex_added_doc_rst,
|
|
|
|
'check': check_new_docs_index},
|
|
|
|
{'regex': __regex_added_doc_rst,
|
|
|
|
'check': check_doc_docs_automake}
|
|
|
|
]
|
|
|
|
|
2017-05-01 16:14:03 -04:00
|
|
|
checks = [
|
|
|
|
{'regex': None,
|
2018-05-09 11:26:06 -07:00
|
|
|
'match_name': lambda x: not line_length_blacklist.search(x),
|
2018-05-09 10:52:49 -07:00
|
|
|
'check': lambda x: line_length_check(x)},
|
2017-05-01 16:14:05 -04:00
|
|
|
|
2017-06-06 08:39:34 -07:00
|
|
|
{'regex': None,
|
2018-05-09 11:26:06 -07:00
|
|
|
'match_name': lambda x: not leading_whitespace_blacklist.search(x),
|
2017-05-01 16:14:05 -04:00
|
|
|
'check': lambda x: not leading_whitespace_is_spaces(x),
|
2017-05-01 16:14:07 -04:00
|
|
|
'print': lambda: print_warning("Line has non-spaces leading whitespace")},
|
2017-05-01 16:14:05 -04:00
|
|
|
|
|
|
|
{'regex': None, 'match_name': None,
|
|
|
|
'check': lambda x: trailing_whitespace_or_crlf(x),
|
2017-05-01 16:14:07 -04:00
|
|
|
'print': lambda: print_warning("Line has trailing whitespace")},
|
2017-05-01 16:14:05 -04:00
|
|
|
|
2017-08-09 13:37:51 -07:00
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
2017-05-30 14:22:54 -07:00
|
|
|
'prereq': lambda x: not is_comment_line(x),
|
2017-05-01 16:14:05 -04:00
|
|
|
'check': lambda x: not if_and_for_whitespace_checks(x),
|
2017-05-01 16:14:07 -04:00
|
|
|
'print': lambda: print_error("Improper whitespace around control block")},
|
2017-05-01 16:14:05 -04:00
|
|
|
|
2017-08-09 13:37:51 -07:00
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
2017-05-30 14:22:54 -07:00
|
|
|
'prereq': lambda x: not is_comment_line(x),
|
2017-05-01 16:14:05 -04:00
|
|
|
'check': lambda x: not if_and_for_end_with_bracket_check(x),
|
2017-05-01 16:14:07 -04:00
|
|
|
'print': lambda: print_error("Inappropriate bracing around statement")},
|
2017-05-01 16:14:05 -04:00
|
|
|
|
2017-08-09 13:37:51 -07:00
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
2017-05-30 14:22:54 -07:00
|
|
|
'prereq': lambda x: not is_comment_line(x),
|
2017-05-01 16:14:05 -04:00
|
|
|
'check': lambda x: pointer_whitespace_check(x),
|
|
|
|
'print':
|
2017-08-09 13:37:52 -07:00
|
|
|
lambda: print_error("Inappropriate spacing in pointer declaration")},
|
|
|
|
|
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
|
|
|
'prereq': lambda x: not is_comment_line(x),
|
|
|
|
'check': lambda x: trailing_operator(x),
|
|
|
|
'print':
|
|
|
|
lambda: print_error("Line has '?' or ':' operator at end of line")},
|
2018-01-23 18:28:52 -08:00
|
|
|
|
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
|
|
|
'prereq': lambda x: has_comment(x),
|
|
|
|
'check': lambda x: has_xxx_mark(x),
|
|
|
|
'print': lambda: print_warning("Comment with 'xxx' marker")},
|
2018-04-01 11:06:54 -04:00
|
|
|
|
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
|
|
|
'prereq': lambda x: has_comment(x),
|
2018-05-09 10:52:49 -07:00
|
|
|
'check': lambda x: check_comment_spelling(x)},
|
2018-07-31 16:37:38 -04:00
|
|
|
|
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
|
|
|
'check': lambda x: empty_return_with_brace(x),
|
|
|
|
'interim_line': True,
|
|
|
|
'print':
|
|
|
|
lambda: print_warning("Empty return followed by brace, consider omitting")
|
|
|
|
},
|
2017-05-01 16:14:03 -04:00
|
|
|
]
|
|
|
|
|
|
|
|
|
2017-05-23 17:57:16 -07:00
|
|
|
def regex_function_factory(func_name):
|
2017-05-26 11:32:22 -07:00
|
|
|
regex = re.compile(r'\b%s\([^)]*\)' % func_name)
|
2017-05-23 17:57:16 -07:00
|
|
|
return lambda x: regex.search(x) is not None
|
|
|
|
|
|
|
|
|
|
|
|
def regex_error_factory(description):
|
|
|
|
return lambda: print_error(description)
|
|
|
|
|
|
|
|
|
|
|
|
std_functions = [
|
|
|
|
('malloc', 'Use xmalloc() in place of malloc()'),
|
|
|
|
('calloc', 'Use xcalloc() in place of calloc()'),
|
|
|
|
('realloc', 'Use xrealloc() in place of realloc()'),
|
|
|
|
('strdup', 'Use xstrdup() in place of strdup()'),
|
|
|
|
('asprintf', 'Use xasprintf() in place of asprintf()'),
|
|
|
|
('vasprintf', 'Use xvasprintf() in place of vasprintf()'),
|
|
|
|
('strcpy', 'Use ovs_strlcpy() in place of strcpy()'),
|
|
|
|
('strlcpy', 'Use ovs_strlcpy() in place of strlcpy()'),
|
|
|
|
('strncpy', 'Use ovs_strzcpy() in place of strncpy()'),
|
|
|
|
('strerror', 'Use ovs_strerror() in place of strerror()'),
|
|
|
|
('sleep', 'Use xsleep() in place of sleep()'),
|
|
|
|
('abort', 'Use ovs_abort() in place of abort()'),
|
2017-06-18 21:48:09 +01:00
|
|
|
('assert', 'Use ovs_assert() in place of assert()'),
|
2017-05-23 17:57:16 -07:00
|
|
|
('error', 'Use ovs_error() in place of error()'),
|
|
|
|
]
|
|
|
|
checks += [
|
2017-08-09 13:37:51 -07:00
|
|
|
{'regex': '(\.c|\.h)(\.in)?$',
|
2017-05-23 17:57:16 -07:00
|
|
|
'match_name': None,
|
2017-05-30 14:22:54 -07:00
|
|
|
'prereq': lambda x: not is_comment_line(x),
|
2017-05-23 17:57:16 -07:00
|
|
|
'check': regex_function_factory(function_name),
|
|
|
|
'print': regex_error_factory(description)}
|
|
|
|
for (function_name, description) in std_functions]
|
|
|
|
|
|
|
|
|
2017-08-09 13:37:50 -07:00
|
|
|
def regex_operator_factory(operator):
|
|
|
|
regex = re.compile(r'^[^#][^"\']*[^ "]%s[^ "\'][^"]*' % operator)
|
2018-04-01 11:06:53 -04:00
|
|
|
return lambda x: regex.search(filter_comments(x)) is not None
|
2017-08-09 13:37:50 -07:00
|
|
|
|
|
|
|
|
|
|
|
infix_operators = \
|
2018-03-24 11:17:17 -07:00
|
|
|
[re.escape(op) for op in ['%', '<<', '>>', '<=', '>=', '==', '!=',
|
2017-08-09 13:37:50 -07:00
|
|
|
'^', '|', '&&', '||', '?:', '=', '+=', '-=', '*=', '/=', '%=',
|
|
|
|
'&=', '^=', '|=', '<<=', '>>=']] \
|
|
|
|
+ ['[^<" ]<[^=" ]', '[^->" ]>[^=" ]', '[^ !()/"]\*[^/]', '[^ !&()"]&',
|
2018-03-24 11:17:17 -07:00
|
|
|
'[^" +(]\+[^"+;]', '[^" -(]-[^"->;]', '[^" <>=!^|+\-*/%&]=[^"=]',
|
2018-04-03 11:23:26 -07:00
|
|
|
'[^* ]/[^* ]']
|
2017-08-09 13:37:50 -07:00
|
|
|
checks += [
|
|
|
|
{'regex': '(\.c|\.h)(\.in)?$', 'match_name': None,
|
|
|
|
'prereq': lambda x: not is_comment_line(x),
|
|
|
|
'check': regex_operator_factory(operator),
|
|
|
|
'print': lambda: print_warning("Line lacks whitespace around operator")}
|
|
|
|
for operator in infix_operators]
|
|
|
|
|
|
|
|
|
2017-05-01 16:14:03 -04:00
|
|
|
def get_file_type_checks(filename):
|
|
|
|
"""Returns the list of checks for a file based on matching the filename
|
|
|
|
against regex."""
|
|
|
|
global checks
|
|
|
|
checkList = []
|
|
|
|
for check in checks:
|
|
|
|
if check['regex'] is None and check['match_name'] is None:
|
|
|
|
checkList.append(check)
|
|
|
|
if check['regex'] is not None and \
|
|
|
|
re.compile(check['regex']).search(filename) is not None:
|
|
|
|
checkList.append(check)
|
|
|
|
elif check['match_name'] is not None and check['match_name'](filename):
|
|
|
|
checkList.append(check)
|
|
|
|
return checkList
|
|
|
|
|
|
|
|
|
|
|
|
def run_checks(current_file, line, lineno):
|
|
|
|
"""Runs the various checks for the particular line. This will take
|
|
|
|
filename into account."""
|
2017-05-01 16:14:07 -04:00
|
|
|
global checking_file, total_line
|
2017-05-01 16:14:04 -04:00
|
|
|
print_line = False
|
2017-05-01 16:14:03 -04:00
|
|
|
for check in get_file_type_checks(current_file):
|
2017-05-30 14:22:54 -07:00
|
|
|
if 'prereq' in check and not check['prereq'](line):
|
|
|
|
continue
|
2017-05-01 16:14:03 -04:00
|
|
|
if check['check'](line):
|
2018-05-09 10:52:49 -07:00
|
|
|
if 'print' in check:
|
|
|
|
check['print']()
|
2017-05-01 16:14:04 -04:00
|
|
|
print_line = True
|
|
|
|
|
|
|
|
if print_line:
|
2017-05-01 16:14:07 -04:00
|
|
|
if checking_file:
|
|
|
|
print("%s:%d:" % (current_file, lineno))
|
|
|
|
else:
|
|
|
|
print("#%d FILE: %s:%d:" % (total_line, current_file, lineno))
|
|
|
|
print("%s\n" % line)
|
2017-05-01 16:14:03 -04:00
|
|
|
|
|
|
|
|
2018-07-31 16:37:38 -04:00
|
|
|
def interim_line_check(current_file, line, lineno):
|
|
|
|
"""Runs the various checks for the particular interim line. This will
|
|
|
|
take filename into account, and will check for the 'interim_line'
|
|
|
|
key before running the check."""
|
|
|
|
global checking_file, total_line
|
|
|
|
print_line = False
|
|
|
|
for check in get_file_type_checks(current_file):
|
|
|
|
if 'prereq' in check and not check['prereq'](line):
|
|
|
|
continue
|
|
|
|
if 'interim_line' in check and check['interim_line']:
|
|
|
|
if check['check'](line):
|
|
|
|
if 'print' in check:
|
|
|
|
check['print']()
|
|
|
|
print_line = True
|
|
|
|
|
|
|
|
if print_line:
|
|
|
|
if checking_file:
|
|
|
|
print("%s:%d:" % (current_file, lineno))
|
|
|
|
else:
|
|
|
|
print("#%d FILE: %s:%d:" % (total_line, current_file, lineno))
|
|
|
|
print("%s\n" % line)
|
|
|
|
|
|
|
|
|
2018-04-17 18:46:24 -03:00
|
|
|
def run_file_checks(text):
|
|
|
|
"""Runs the various checks for the text."""
|
|
|
|
for check in file_checks:
|
|
|
|
if check['regex'].search(text) is not None:
|
|
|
|
check['check'](text)
|
|
|
|
|
|
|
|
|
2018-08-10 13:15:41 -07:00
|
|
|
def ovs_checkpatch_parse(text, filename, author=None, committer=None):
|
2018-07-31 16:37:38 -04:00
|
|
|
global print_file_name, total_line, checking_file, \
|
|
|
|
empty_return_check_state
|
2018-04-01 11:06:52 -04:00
|
|
|
|
|
|
|
PARSE_STATE_HEADING = 0
|
|
|
|
PARSE_STATE_DIFF_HEADER = 1
|
|
|
|
PARSE_STATE_CHANGE_BODY = 2
|
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
lineno = 0
|
|
|
|
signatures = []
|
|
|
|
co_authors = []
|
|
|
|
parse = 0
|
2017-05-26 11:22:36 -07:00
|
|
|
current_file = filename if checking_file else ''
|
2016-04-07 10:49:15 -07:00
|
|
|
previous_file = ''
|
2018-06-27 20:40:04 -04:00
|
|
|
seppatch = re.compile(r'^---([\w]*| \S+)$')
|
2016-03-31 10:45:35 -04:00
|
|
|
hunks = re.compile('^(---|\+\+\+) (\S+)')
|
2016-10-21 14:49:06 -04:00
|
|
|
hunk_differences = re.compile(
|
|
|
|
r'^@@ ([0-9-+]+),([0-9-+]+) ([0-9-+]+),([0-9-+]+) @@')
|
2018-08-10 13:15:41 -07:00
|
|
|
is_author = re.compile(r'^(Author|From): (.*)$', re.I | re.M | re.S)
|
|
|
|
is_committer = re.compile(r'^(Commit: )(.*)$', re.I | re.M | re.S)
|
2018-06-20 14:40:58 -04:00
|
|
|
is_signature = re.compile(r'^(Signed-off-by: )(.*)$',
|
2016-03-31 10:45:35 -04:00
|
|
|
re.I | re.M | re.S)
|
2018-06-20 14:40:58 -04:00
|
|
|
is_co_author = re.compile(r'^(Co-authored-by: )(.*)$',
|
2016-03-31 10:45:35 -04:00
|
|
|
re.I | re.M | re.S)
|
2017-07-14 13:57:21 +03:00
|
|
|
is_gerrit_change_id = re.compile(r'(\s*(change-id: )(.*))$',
|
|
|
|
re.I | re.M | re.S)
|
2016-03-31 10:45:35 -04:00
|
|
|
|
2017-07-14 13:57:22 +03:00
|
|
|
reset_counters()
|
|
|
|
|
checkpatch: Use default encoding from email library.
There are three paths for running the core checkpatch path: From a file,
from stdin, or reading from git output. Currently, the file version of
this calls the "email" library's decode routine which translates the
stream into a bytes array, which we later call decode() to turn it back
into a regular string. This works on python2 and python3, but the other
paths don't work in python3 due to the following error:
$ utilities/checkpatch.py -1
== Checking HEAD~0 ==
Traceback (most recent call last):
File "utilities/checkpatch.py", line 491, in <module>
if ovs_checkpatch_parse(patch, revision):
File "utilities/checkpatch.py", line 324, in ovs_checkpatch_parse
for line in text.decode().split('\n'):
AttributeError: 'str' object has no attribute 'decode'
Rather than performing this extra encode/decode, strip these out from
this path so that the stdin and git variants of checkpatch can work in
python3.
Signed-off-by: Joe Stringer <joe@ovn.org>
Acked-by: Ben Pfaff <blp@ovn.org>
2017-07-04 07:16:46 -07:00
|
|
|
for line in text.split('\n'):
|
2016-04-07 10:49:15 -07:00
|
|
|
if current_file != previous_file:
|
|
|
|
previous_file = current_file
|
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
lineno = lineno + 1
|
2017-05-01 16:14:07 -04:00
|
|
|
total_line = total_line + 1
|
2016-03-31 10:45:35 -04:00
|
|
|
if len(line) <= 0:
|
|
|
|
continue
|
|
|
|
|
2016-10-21 14:49:09 -04:00
|
|
|
if checking_file:
|
2018-04-01 11:06:52 -04:00
|
|
|
parse = PARSE_STATE_CHANGE_BODY
|
2016-10-21 14:49:09 -04:00
|
|
|
|
2018-04-01 11:06:52 -04:00
|
|
|
if parse == PARSE_STATE_DIFF_HEADER:
|
2016-03-31 10:45:35 -04:00
|
|
|
match = hunks.match(line)
|
|
|
|
if match:
|
2018-04-01 11:06:52 -04:00
|
|
|
parse = PARSE_STATE_CHANGE_BODY
|
2017-05-08 10:50:10 -04:00
|
|
|
current_file = match.group(2)[2:]
|
2016-10-21 14:49:03 -04:00
|
|
|
print_file_name = current_file
|
2016-03-31 10:45:35 -04:00
|
|
|
continue
|
2018-04-01 11:06:52 -04:00
|
|
|
elif parse == PARSE_STATE_HEADING:
|
2018-06-27 20:40:04 -04:00
|
|
|
if seppatch.match(line):
|
2018-04-01 11:06:52 -04:00
|
|
|
parse = PARSE_STATE_DIFF_HEADER
|
2016-03-31 10:45:35 -04:00
|
|
|
if not skip_signoff_check:
|
2018-08-10 13:15:41 -07:00
|
|
|
|
|
|
|
# Check that the patch has an author, that the
|
|
|
|
# author is not among the co-authors, and that the
|
|
|
|
# co-authors are unique.
|
|
|
|
if not author:
|
|
|
|
print_error("Patch lacks author.")
|
|
|
|
continue
|
2018-11-01 08:06:31 -07:00
|
|
|
if " via " in author or "@openvswitch.org" in author:
|
|
|
|
print_error("Author should not be mailing list.")
|
|
|
|
continue
|
2018-08-10 13:15:41 -07:00
|
|
|
if author in co_authors:
|
|
|
|
print_error("Author should not be also be co-author.")
|
|
|
|
continue
|
|
|
|
if len(set(co_authors)) != len(co_authors):
|
|
|
|
print_error("Duplicate co-author.")
|
|
|
|
|
|
|
|
# Check that the author, all co-authors, and the
|
|
|
|
# committer (if any) signed off.
|
|
|
|
if author not in signatures:
|
|
|
|
print_error("Author %s needs to sign off." % author)
|
|
|
|
for ca in co_authors:
|
|
|
|
if ca not in signatures:
|
|
|
|
print_error("Co-author %s needs to sign off." % ca)
|
|
|
|
break
|
|
|
|
if (committer
|
|
|
|
and author != committer
|
|
|
|
and committer not in signatures):
|
|
|
|
print_error("Committer %s needs to sign off."
|
|
|
|
% committer)
|
|
|
|
|
|
|
|
# Check for signatures that we do not expect.
|
|
|
|
# This is only a warning because there can be,
|
|
|
|
# rarely, a signature chain.
|
|
|
|
#
|
|
|
|
# If we don't have a known committer, and there is
|
|
|
|
# a single extra sign-off, then do not warn
|
|
|
|
# because that extra sign-off is probably the
|
|
|
|
# committer.
|
|
|
|
extra_sigs = [x for x in signatures
|
|
|
|
if x not in co_authors
|
|
|
|
and x != author
|
|
|
|
and x != committer]
|
|
|
|
if len(extra_sigs) > 1 or (committer and extra_sigs):
|
|
|
|
print_warning("Unexpected sign-offs from developers "
|
|
|
|
"who are not authors or co-authors or "
|
|
|
|
"committers: %s"
|
|
|
|
% ", ".join(extra_sigs))
|
|
|
|
elif is_committer.match(line):
|
|
|
|
committer = is_committer.match(line).group(2)
|
|
|
|
elif is_author.match(line):
|
|
|
|
author = is_author.match(line).group(2)
|
2016-03-31 10:45:35 -04:00
|
|
|
elif is_signature.match(line):
|
|
|
|
m = is_signature.match(line)
|
2018-06-20 14:40:58 -04:00
|
|
|
signatures.append(m.group(2))
|
2016-03-31 10:45:35 -04:00
|
|
|
elif is_co_author.match(line):
|
|
|
|
m = is_co_author.match(line)
|
2018-06-20 14:40:58 -04:00
|
|
|
co_authors.append(m.group(2))
|
2017-07-14 13:57:21 +03:00
|
|
|
elif is_gerrit_change_id.match(line):
|
|
|
|
print_error(
|
|
|
|
"Remove Gerrit Change-Id's before submitting upstream.")
|
|
|
|
print("%d: %s\n" % (lineno, line))
|
2018-04-01 11:06:52 -04:00
|
|
|
elif parse == PARSE_STATE_CHANGE_BODY:
|
2016-03-31 10:45:35 -04:00
|
|
|
newfile = hunks.match(line)
|
|
|
|
if newfile:
|
2017-05-01 16:14:08 -04:00
|
|
|
current_file = newfile.group(2)[2:]
|
2016-10-21 14:49:03 -04:00
|
|
|
print_file_name = current_file
|
2016-03-31 10:45:35 -04:00
|
|
|
continue
|
2016-10-21 14:49:06 -04:00
|
|
|
reset_line_number = hunk_differences.match(line)
|
|
|
|
if reset_line_number:
|
2018-07-31 16:37:38 -04:00
|
|
|
empty_return_check_state = RETURN_CHECK_INITIAL_STATE
|
2016-10-21 14:49:06 -04:00
|
|
|
lineno = int(reset_line_number.group(3))
|
|
|
|
if lineno < 0:
|
|
|
|
lineno = -1 * lineno
|
|
|
|
lineno -= 1
|
2018-07-31 16:37:38 -04:00
|
|
|
|
2016-10-21 14:49:06 -04:00
|
|
|
if is_subtracted_line(line):
|
|
|
|
lineno -= 1
|
2016-03-31 10:45:35 -04:00
|
|
|
continue
|
2016-10-21 14:49:09 -04:00
|
|
|
|
|
|
|
cmp_line = added_line(line)
|
|
|
|
|
2018-07-31 16:37:38 -04:00
|
|
|
if not is_added_line(line):
|
|
|
|
interim_line_check(current_file, cmp_line, lineno)
|
|
|
|
continue
|
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
# Skip files which have /datapath in them, since they are
|
|
|
|
# linux or windows coding standards
|
2017-05-08 10:50:10 -04:00
|
|
|
if current_file.startswith('datapath'):
|
2016-03-31 10:45:35 -04:00
|
|
|
continue
|
2017-05-30 15:50:34 -07:00
|
|
|
if current_file.startswith('include/linux'):
|
|
|
|
continue
|
2017-05-01 16:14:03 -04:00
|
|
|
run_checks(current_file, cmp_line, lineno)
|
2018-04-17 18:46:24 -03:00
|
|
|
|
|
|
|
run_file_checks(text)
|
2016-03-31 10:45:35 -04:00
|
|
|
if __errors or __warnings:
|
|
|
|
return -1
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
|
|
def usage():
|
2017-06-14 13:42:54 -07:00
|
|
|
print("""\
|
|
|
|
Open vSwitch checkpatch.py
|
|
|
|
Checks a patch for trivial mistakes.
|
|
|
|
usage:
|
2017-07-14 13:57:23 +03:00
|
|
|
%s [options] [PATCH1 [PATCH2 ...] | -f SOURCE1 [SOURCE2 ...] | -1 | -2 | ...]
|
2017-06-14 13:42:54 -07:00
|
|
|
|
|
|
|
Input options:
|
|
|
|
-f|--check-file Arguments are source files, not patches.
|
|
|
|
-1, -2, ... Check recent commits in this repo.
|
|
|
|
|
|
|
|
Check options:
|
|
|
|
-h|--help This help message
|
|
|
|
-b|--skip-block-whitespace Skips the if/while/for whitespace tests
|
|
|
|
-l|--skip-leading-whitespace Skips the leading whitespace test
|
2018-06-20 14:40:57 -04:00
|
|
|
-q|--quiet Only print error and warning information
|
2017-06-14 13:42:54 -07:00
|
|
|
-s|--skip-signoff-lines Tolerate missing Signed-off-by line
|
2018-04-01 11:06:54 -04:00
|
|
|
-S|--spellcheck-comments Check C comments for possible spelling mistakes
|
2017-06-14 13:42:54 -07:00
|
|
|
-t|--skip-trailing-whitespace Skips the trailing whitespace test"""
|
|
|
|
% sys.argv[0])
|
2016-03-31 10:45:35 -04:00
|
|
|
|
2016-03-31 08:45:54 -07:00
|
|
|
|
2017-07-14 13:57:22 +03:00
|
|
|
def ovs_checkpatch_print_result(result):
|
2018-06-20 14:40:57 -04:00
|
|
|
global quiet, __warnings, __errors, total_line
|
|
|
|
|
2017-07-14 13:57:22 +03:00
|
|
|
if result < 0:
|
|
|
|
print("Lines checked: %d, Warnings: %d, Errors: %d\n" %
|
|
|
|
(total_line, __warnings, __errors))
|
2018-06-20 14:40:57 -04:00
|
|
|
elif not quiet:
|
2017-07-14 13:57:22 +03:00
|
|
|
print("Lines checked: %d, no obvious problems found\n" % (total_line))
|
|
|
|
|
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
def ovs_checkpatch_file(filename):
|
|
|
|
try:
|
|
|
|
mail = email.message_from_file(open(filename, 'r'))
|
|
|
|
except:
|
|
|
|
print_error("Unable to parse file '%s'. Is it a patch?" % filename)
|
|
|
|
return -1
|
|
|
|
|
|
|
|
for part in mail.walk():
|
|
|
|
if part.get_content_maintype() == 'multipart':
|
|
|
|
continue
|
2018-08-10 13:15:41 -07:00
|
|
|
result = ovs_checkpatch_parse(part.get_payload(decode=False), filename,
|
|
|
|
mail.get('Author', mail['From']),
|
|
|
|
mail['Commit'])
|
2017-07-14 13:57:22 +03:00
|
|
|
ovs_checkpatch_print_result(result)
|
2016-10-21 14:49:04 -04:00
|
|
|
return result
|
2016-03-31 10:45:35 -04:00
|
|
|
|
2016-12-01 12:58:25 -08:00
|
|
|
|
2017-06-14 13:42:54 -07:00
|
|
|
def partition(pred, iterable):
|
|
|
|
"""Returns [[trues], [falses]], where [trues] is the items in
|
|
|
|
'iterable' that satisfy 'pred' and [falses] is all the rest."""
|
|
|
|
trues = []
|
|
|
|
falses = []
|
|
|
|
for item in iterable:
|
|
|
|
if pred(item):
|
|
|
|
trues.append(item)
|
|
|
|
else:
|
|
|
|
falses.append(item)
|
|
|
|
return trues, falses
|
|
|
|
|
|
|
|
|
2016-03-31 10:45:35 -04:00
|
|
|
if __name__ == '__main__':
|
|
|
|
try:
|
2017-06-14 13:42:54 -07:00
|
|
|
numeric_options, args = partition(lambda s: re.match('-[0-9]+$', s),
|
|
|
|
sys.argv[1:])
|
|
|
|
n_patches = int(numeric_options[-1][1:]) if numeric_options else 0
|
|
|
|
|
2018-06-20 14:40:57 -04:00
|
|
|
optlist, args = getopt.getopt(args, 'bhlstfSq',
|
2016-10-21 14:49:09 -04:00
|
|
|
["check-file",
|
|
|
|
"help",
|
2016-03-31 10:45:35 -04:00
|
|
|
"skip-block-whitespace",
|
|
|
|
"skip-leading-whitespace",
|
|
|
|
"skip-signoff-lines",
|
2018-04-01 11:06:54 -04:00
|
|
|
"skip-trailing-whitespace",
|
2018-06-20 14:40:57 -04:00
|
|
|
"spellcheck-comments",
|
|
|
|
"quiet"])
|
2016-03-31 10:45:35 -04:00
|
|
|
except:
|
|
|
|
print("Unknown option encountered. Please rerun with -h for help.")
|
|
|
|
sys.exit(-1)
|
|
|
|
|
|
|
|
for o, a in optlist:
|
|
|
|
if o in ("-h", "--help"):
|
|
|
|
usage()
|
|
|
|
sys.exit(0)
|
|
|
|
elif o in ("-b", "--skip-block-whitespace"):
|
|
|
|
skip_block_whitespace_check = True
|
|
|
|
elif o in ("-l", "--skip-leading-whitespace"):
|
|
|
|
skip_leading_whitespace_check = True
|
|
|
|
elif o in ("-s", "--skip-signoff-lines"):
|
|
|
|
skip_signoff_check = True
|
|
|
|
elif o in ("-t", "--skip-trailing-whitespace"):
|
|
|
|
skip_trailing_whitespace_check = True
|
2016-10-21 14:49:09 -04:00
|
|
|
elif o in ("-f", "--check-file"):
|
|
|
|
checking_file = True
|
2018-04-01 11:06:54 -04:00
|
|
|
elif o in ("-S", "--spellcheck-comments"):
|
2018-11-01 08:06:32 -07:00
|
|
|
if not open_spell_check_dict():
|
2018-11-05 13:37:32 -05:00
|
|
|
print("WARNING: The enchant library isn't available.")
|
2018-04-01 11:06:54 -04:00
|
|
|
print(" Please install python enchant.")
|
|
|
|
else:
|
|
|
|
spellcheck_comments = True
|
2018-06-20 14:40:57 -04:00
|
|
|
elif o in ("-q", "--quiet"):
|
|
|
|
quiet = True
|
2016-03-31 10:45:35 -04:00
|
|
|
else:
|
|
|
|
print("Unknown option '%s'" % o)
|
|
|
|
sys.exit(-1)
|
2017-05-01 16:14:07 -04:00
|
|
|
|
|
|
|
if sys.stdout.isatty():
|
|
|
|
colors = True
|
|
|
|
|
2017-06-14 13:42:54 -07:00
|
|
|
if n_patches:
|
|
|
|
status = 0
|
2017-07-14 13:57:24 +03:00
|
|
|
|
|
|
|
git_log = 'git log --no-color --no-merges --pretty=format:"%H %s" '
|
|
|
|
with os.popen(git_log + '-%d' % n_patches, 'r') as f:
|
|
|
|
commits = f.read().split("\n")
|
|
|
|
|
2017-06-15 14:57:30 +03:00
|
|
|
for i in reversed(range(0, n_patches)):
|
2017-07-14 13:57:24 +03:00
|
|
|
revision, name = commits[i].split(" ", 1)
|
2018-08-10 13:15:41 -07:00
|
|
|
f = os.popen('''git format-patch -1 --stdout --pretty=format:"\
|
|
|
|
Author: %an <%ae>
|
|
|
|
Commit: %cn <%ce>
|
|
|
|
Subject: %s
|
|
|
|
|
|
|
|
%b" ''' + revision, 'r')
|
2017-06-14 13:42:54 -07:00
|
|
|
patch = f.read()
|
|
|
|
f.close()
|
|
|
|
|
2018-06-20 14:40:57 -04:00
|
|
|
if not quiet:
|
|
|
|
print('== Checking %s ("%s") ==' % (revision[0:12], name))
|
2017-07-14 13:57:22 +03:00
|
|
|
result = ovs_checkpatch_parse(patch, revision)
|
|
|
|
ovs_checkpatch_print_result(result)
|
|
|
|
if result:
|
2017-06-14 13:42:54 -07:00
|
|
|
status = -1
|
|
|
|
sys.exit(status)
|
|
|
|
|
2017-07-14 13:57:23 +03:00
|
|
|
if not args:
|
2016-03-31 10:45:35 -04:00
|
|
|
if sys.stdin.isatty():
|
|
|
|
usage()
|
|
|
|
sys.exit(-1)
|
2017-07-14 13:57:22 +03:00
|
|
|
result = ovs_checkpatch_parse(sys.stdin.read(), '-')
|
|
|
|
ovs_checkpatch_print_result(result)
|
|
|
|
sys.exit(result)
|
2017-07-14 13:57:23 +03:00
|
|
|
|
|
|
|
status = 0
|
|
|
|
for filename in args:
|
2018-06-20 14:40:57 -04:00
|
|
|
if not quiet:
|
|
|
|
print('== Checking "%s" ==' % filename)
|
2017-07-14 13:57:23 +03:00
|
|
|
result = ovs_checkpatch_file(filename)
|
|
|
|
if result:
|
|
|
|
status = -1
|
|
|
|
sys.exit(status)
|