Skip to content
8 changes: 4 additions & 4 deletions paddle/scripts/conda_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def meta_build_mac(var, python_str, paddle_version, build_var, build_name_str):
def meta_build_linux(
var, python_str, paddle_version, build_var, build_name_str, cuda_str=None
):
if cuda_str == None:
if cuda_str is None:
package_str = (
"""
package:
Expand All @@ -192,7 +192,7 @@ def meta_build_linux(
)
meta_build = var.build + build_name_str
meta_str = package_str + meta_build + requirement
if not (cuda_str == None):
if not (cuda_str is None):
meta_str = meta_str + cuda_str
meta_str = meta_str + var.test + var.about

Expand All @@ -209,7 +209,7 @@ def meta_build_linux(
def meta_build_windows(
var, python_str, paddle_version, blt_var, build_name_str, cuda_str=None
):
if cuda_str == None:
if cuda_str is None:
package_str = (
"""
package:
Expand All @@ -235,7 +235,7 @@ def meta_build_windows(
meta_build = var.build + build_name_str
meta_str = package_str + meta_build + requirement

if not (cuda_str == None):
if not (cuda_str is None):
meta_str = meta_str + cuda_str

blt_str = var.blt_const + blt_var
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/cost_model/cost_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def static_cost_data(self):

def get_static_op_time(self, op_name, forward=True, dtype="float32"):
# if forward is True, return op forward time, otherwise return op backward time.
if op_name == None:
if op_name is None:
raise ValueError(
'op_name should not be empty when you want to get static op time'
)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/dataset/imdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def tokenize(pattern):
# tarfile.extractfile, which does random access and might
# destroy hard disks.
tf = tarf.next()
while tf != None:
while tf is not None:
if bool(pattern.match(tf.name)):
# newline and punctuations removal and ad-hoc tokenization.
yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/dataset/tests/imdb_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,13 @@ class TestIMDB(unittest.TestCase):
word_idx = None

def test_build_dict(self):
if self.word_idx == None:
if self.word_idx is None:
self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150)

self.assertEqual(len(self.word_idx), 7036)

def check_dataset(self, dataset, expected_size):
if self.word_idx == None:
if self.word_idx is None:
self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150)

sum = 0
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/cost/base_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,7 @@ def get_max_beta(self, ranks):
if forward_order_beta > backward_order_beta
else backward_order_beta
)
if max_beta == None:
if max_beta is None:
max_beta = beta
else:
if beta > max_beta:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def partition(
dist_op_context.rank_id = self._rank_id

# partition startup program
if serial_startup_program == None:
if serial_startup_program is None:
partitioned_startup_prog = None
else:
partitioned_startup_prog = self.partition_startup_program(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/process_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def new_process_group(ranks, group_id=None):
num_groups = len(_g_process_group_map)
# Note: our process group may interfere with the original implementation
# so the created group id should start from the original _new_ring_id()
if group_id == None:
if group_id is None:
group_id = _new_ring_id() + num_groups + 1

new_pg = ProcessGroup(group_id, ranks)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,7 @@ def _update(self, i, trial, results):
self._finished_trials.append(trial)

cur_mertic = get_metric(results)
if self._best_metric == None or cur_mertic > self._best_metric:
if self._best_metric is None or cur_mertic > self._best_metric:
self._best_metric = cur_mertic
self._best_iter = i

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/elastic.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def set_np(self, np):
self.etcd.put(self.np_path, '{}'.format(np).encode('latin-1'))

def scale_np(self, np):
if self.etcd.get(self.np_path)[0] != None:
if self.etcd.get(self.np_path)[0] is not None:
self.set_np(np)
return True
return False
Expand Down
36 changes: 18 additions & 18 deletions python/paddle/distributed/fleet/base/role_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def __get_default_iface_from_gateway(self):
if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None:
elif gateway_idx is not None and iface_idx is not None:
gateway = None
if len(item) > gateway_idx:
gateway = item[gateway_idx]
Expand Down Expand Up @@ -845,7 +845,7 @@ def _ps_env(self): # each role will execute it
self._server_endpoints = self._server_endpoints.split(",")

self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None)
if self._worker_endpoints != None:
if self._worker_endpoints is not None:
self._worker_endpoints = self._worker_endpoints.split(",")
else:
self._worker_endpoints = []
Expand All @@ -860,14 +860,14 @@ def _ps_env(self): # each role will execute it
self._coordinator_endpoints = self._coordinator_endpoints.split(",")

trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None)
if trainers_num == None:
if trainers_num is None:
raise ValueError(
"Can not find PADDLE_TRAINERS_NUM, please check your environment."
)
trainers_num = int(trainers_num)

training_role = os.getenv("TRAINING_ROLE", None)
if training_role == None:
if training_role is None:
raise ValueError(
"Can not find TRAINING_ROLE, please check your environment."
)
Expand Down Expand Up @@ -937,39 +937,39 @@ def _ps_env(self): # each role will execute it
if training_role == "TRAINER":
role = Role.WORKER
current_id = os.getenv("PADDLE_TRAINER_ID", None)
if current_id == None:
if current_id is None:
raise ValueError(
"Can not find PADDLE_TRAINER_ID, please check your environment."
)
current_id = int(current_id)
if self._is_heter_parameter_server_mode:
self._stage_id = os.getenv("STAGE_ID", None)
if self._stage_id == None:
if self._stage_id is None:
raise ValueError(
"Can not find STAGE_ID, please check your environment."
)
self._stage_id = int(self._stage_id)
self._stage_num = os.getenv("STAGE_NUM", None)
if self._stage_num == None:
if self._stage_num is None:
raise ValueError(
"Can not find STAGE_NUM, please check your environment."
)
self._stage_num = int(self._stage_num)
self._stage_trainers = os.getenv(
"PADDLE_STAGE_TRAINERS_NUM", None
)
if self._stage_trainers == None:
if self._stage_trainers is None:
raise ValueError(
"Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment."
)
self._stage_trainers = eval(self._stage_trainers)
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
if cur_port is None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment."
)
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
if cur_ip is None:
raise ValueError(
"Can not find POD_IP, please check your environment."
)
Expand All @@ -982,12 +982,12 @@ def _ps_env(self): # each role will execute it
elif training_role == "PSERVER":
role = Role.SERVER
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
if cur_port is None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment."
)
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
if cur_ip is None:
raise ValueError(
"Can not find POD_IP, please check your environment."
)
Expand All @@ -997,20 +997,20 @@ def _ps_env(self): # each role will execute it
elif training_role == "HETER_TRAINER":
role = Role.HETER_WORKER
self._stage_id = os.getenv("STAGE_ID", None)
if self._stage_id == None:
if self._stage_id is None:
raise ValueError(
"Can not find STAGE_ID, please check your environment."
)
self._stage_id = int(self._stage_id)
self._stage_num = os.getenv("STAGE_NUM", None)
if self._stage_num == None:
if self._stage_num is None:
raise ValueError(
"Can not find STAGE_NUM, please check your environment."
)
self._stage_num = int(self._stage_num)

self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM", None)
if self._stage_trainers == None:
if self._stage_trainers is None:
raise ValueError(
"Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment."
)
Expand All @@ -1019,7 +1019,7 @@ def _ps_env(self): # each role will execute it
self._heter_trainer_device_type = os.getenv(
"HETER_DEVICE_TYPE", None
)
if self._heter_trainer_device_type == None:
if self._heter_trainer_device_type is None:
raise ValueError(
"Can not find HETER_DEVICE_TYPE, please check your environment."
)
Expand All @@ -1040,12 +1040,12 @@ def _ps_env(self): # each role will execute it
)

cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
if cur_port is None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment."
)
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
if cur_ip is None:
raise ValueError(
"Can not find POD_IP, please check your environment."
)
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/distributed/fleet/base/strategy_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,13 +204,13 @@ def generate_optimizer(
)

return_meta = (
None if meta_optimizers == None else meta_optimizers[0]
None if meta_optimizers is None else meta_optimizers[0]
)
return_graph = (
None if graph_optimizers == None else graph_optimizers[0]
None if graph_optimizers is None else graph_optimizers[0]
)

if meta_optimizers == None or graph_optimizers == None:
if meta_optimizers is None or graph_optimizers is None:
return return_meta, return_graph

# do heuristic filter here, if any meta optimizer in graph optimizers is in
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/distributed/fleet/base/util_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,9 @@ def check_not_expected_ops(prog, not_expected_op_types):
}
for each_var in saved_params:
var_temp = fluid.global_scope().find_var(each_var.name)
assert var_temp != None, "can't not find var: " + each_var.name
assert var_temp is not None, (
"can't not find var: " + each_var.name
)
new_shape = (np.array(var_temp.get_tensor())).shape
assert each_var.name in orig_para_shape, (
each_var.name + "MUST in var list"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def local_iter():
batch_samples = []
line_iter = self.generate_sample(None)
for user_parsed_line in line_iter():
if user_parsed_line == None:
if user_parsed_line is None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
Expand Down Expand Up @@ -121,7 +121,7 @@ def local_iter():
for line in sys.stdin:
line_iter = self.generate_sample(line)
for user_parsed_line in line_iter():
if user_parsed_line == None:
if user_parsed_line is None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/fleet.py
Original file line number Diff line number Diff line change
Expand Up @@ -1285,7 +1285,7 @@ def _minimize_impl(
context["origin_main_program"] = self.origin_main_program
context["origin_main_programs"] = [self.origin_main_program]
context["loss"] = loss
if startup_program == None:
if startup_program is None:
self.origin_startup_program = (
paddle.static.default_startup_program().clone(for_test=False)
)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -796,7 +796,7 @@ def launch():
) # which_distributed_mode must modify args.backend
else:
assert (
args.run_mode == 'collective' or args.run_mode == None
args.run_mode == 'collective' or args.run_mode is None
), "When backend is not 'auto', run mode must be collective"
check_backend(args.backend)
distribute_mode = DistributeMode.COLLECTIVE
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/distributed/fleet/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def pods_endpoints(self):
for pod in self.pods:
ep = "{}:{}".format(pod.addr, pod.port)
assert (
pod.port != None and pod.addr != None
pod.port is not None and pod.addr is not None
), "{} not a valid endpoint".format(ep)
r.append(ep)
return r
Expand Down Expand Up @@ -979,7 +979,7 @@ def get_custom_endpoints(origin_endpoints, offset=0):
origin_endpoint: ip:port
user_define_endpoint: ip:(port+offset)
"""
assert origin_endpoints != None
assert origin_endpoints is not None
paddle_user_define_endpoints_list = []
for ip_port in origin_endpoints.split(","):
ip = ip_port.split(":")[0]
Expand Down Expand Up @@ -1625,7 +1625,7 @@ def get_role_endpoints(self, args):
else:
self.is_local = False
pod_ip = os.getenv("POD_IP", None)
if pod_ip == None:
if pod_ip is None:
_, self.current_node_ip = get_host_name_ip()
else:
self.current_node_ip = pod_ip
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def _enable_strategy(self, dist_strategy, context):
def minimize(
self, loss, startup_program=None, parameter_list=None, no_grad_set=None
):
if startup_program == None:
if startup_program is None:
startup_program = paddle.static.default_startup_program()
compiled_program = self._try_to_compile(
startup_program, loss.block.program, loss
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def minimize_impl(
self.inner_opt.minimize(
loss, startup_program, parameter_list, no_grad_set
)
if startup_program == None:
if startup_program is None:
startup_program = paddle.static.default_startup_program()

# print("program after inner optimizer minimize:",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def build_compiled_startegy(self):
def _load_sparse_params(
self, executor, dirname, varnames, main_program=None
):
assert vars != None
assert vars is not None
check_vars = []
load_prog = Program()
load_block = load_prog.global_block()
Expand Down
Loading