Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions python/pyspark/sql/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -975,7 +975,7 @@ def date_format(date, format):
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of `datetime pattern`_. can be used.

.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. _datetime pattern: /docs/latest/sql-ref-datetime-pattern.html
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@yaooqinn you can just use the http://... links.

Seems the relative links work in your local build but doesn't work when it's released because the documentation root is rebased.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thanks,fixed

.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.

Expand Down Expand Up @@ -1196,8 +1196,6 @@ def to_date(col, format=None):
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted. Equivalent to ``col.cast("date")``.

.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html

>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
Expand All @@ -1221,8 +1219,6 @@ def to_timestamp(col, format=None):
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted. Equivalent to ``col.cast("timestamp")``.

.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html

>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
Expand Down
10 changes: 2 additions & 8 deletions python/pyspark/sql/readwriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,8 +253,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.

.. _partition discovery: /sql-data-sources-parquet.html#partition-discovery
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. _partition discovery: /docs/latest/sql-data-sources-parquet.html#partition-discovery
.. _datetime pattern: /docs/latest/sql-ref-datetime-pattern.html

>>> df1 = spark.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
Expand Down Expand Up @@ -490,8 +490,6 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.

.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html

>>> df = spark.read.csv('python/test_support/sql/ages.csv')
>>> df.dtypes
[('_c0', 'string'), ('_c1', 'string')]
Expand Down Expand Up @@ -865,8 +863,6 @@ def json(self, path, mode=None, compression=None, dateFormat=None, timestampForm
:param ignoreNullFields: Whether to ignore null fields when generating JSON objects.
If None is set, it uses the default value, ``true``.

.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html

>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
Expand Down Expand Up @@ -981,8 +977,6 @@ def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=No
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``. Maximum length is 1 character.

.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html

>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
Expand Down
6 changes: 2 additions & 4 deletions python/pyspark/sql/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -489,8 +489,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.

.. _partition discovery: /sql-data-sources-parquet.html#partition-discovery
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. _partition discovery: /docs/latest/sql-data-sources-parquet.html#partition-discovery
.. _datetime pattern: /docs/latest/sql-ref-datetime-pattern.html

>>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema)
>>> json_sdf.isStreaming
Expand Down Expand Up @@ -725,8 +725,6 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.

.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html

>>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema)
>>> csv_sdf.isStreaming
True
Expand Down