@@ -86,7 +86,8 @@ def __init__(self, sparkContext, sqlContext=None):
8686 >>> df.registerTempTable("allTypes")
8787 >>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
8888 ... 'from allTypes where b and i > 0').collect()
89- [Row(c0=2, c1=2.0, c2=False, c3=2, c4=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
89+ [Row(_c0=2, _c1=2.0, _c2=False, _c3=2, _c4=0, \
90+ time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
9091 >>> df.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
9192 [(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
9293 """
@@ -176,17 +177,17 @@ def registerFunction(self, name, f, returnType=StringType()):
176177
177178 >>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
178179 >>> sqlContext.sql("SELECT stringLengthString('test')").collect()
179- [Row(c0 =u'4')]
180+ [Row(_c0 =u'4')]
180181
181182 >>> from pyspark.sql.types import IntegerType
182183 >>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
183184 >>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
184- [Row(c0 =4)]
185+ [Row(_c0 =4)]
185186
186187 >>> from pyspark.sql.types import IntegerType
187188 >>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
188189 >>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
189- [Row(c0 =4)]
190+ [Row(_c0 =4)]
190191 """
191192 func = lambda _ , it : map (lambda x : f (* x ), it )
192193 ser = AutoBatchedSerializer (PickleSerializer ())
0 commit comments